Go to the documentation of this file.
   33 #include "config_components.h" 
   88                                  ht[
i].bits, ht[
i].values,
 
   89                                  ht[
i].class == 1, 
s->avctx);
 
   93         if (ht[
i].
class < 2) {
 
   94             memcpy(
s->raw_huffman_lengths[ht[
i].class][ht[
i].index],
 
   96             memcpy(
s->raw_huffman_values[ht[
i].class][ht[
i].index],
 
   97                    ht[
i].values, ht[
i].length);
 
  107     if (
len > 14 && buf[12] == 1) 
 
  108         s->interlace_polarity = 1;
 
  109     if (
len > 14 && buf[12] == 2) 
 
  110         s->interlace_polarity = 0;
 
  121                          s->idsp.idct_permutation);
 
  129     if (!
s->picture_ptr) {
 
  133         s->picture_ptr = 
s->picture;
 
  143     s->first_picture = 1;
 
  153     if (
s->extern_huff) {
 
  159                    "error using external huffman table, switching back to internal\n");
 
  165         s->interlace_polarity = 1;           
 
  169             s->interlace_polarity = 1;
 
  176         if (
s->smv_frames_per_jpeg <= 0) {
 
  220         for (
i = 0; 
i < 64; 
i++) {
 
  222             if (
s->quant_matrixes[
index][
i] == 0) {
 
  224                 av_log(
s->avctx, log_level, 
"dqt: 0 quant value\n");
 
  232                                  s->quant_matrixes[
index][8]) >> 1;
 
  235         len -= 1 + 64 * (1+pr);
 
  244     uint8_t bits_table[17];
 
  245     uint8_t val_table[256];
 
  265         for (
i = 1; 
i <= 16; 
i++) {
 
  270         if (len < n || n > 256)
 
  273         for (
i = 0; 
i < n; 
i++) {
 
  284                                       val_table, 
class > 0, 
s->avctx)) < 0)
 
  290                                           val_table, 0, 
s->avctx)) < 0)
 
  294         for (
i = 0; 
i < 16; 
i++)
 
  295             s->raw_huffman_lengths[
class][
index][
i] = bits_table[
i + 1];
 
  297             s->raw_huffman_values[
class][
index][
i] = val_table[
i];
 
  310     memset(
s->upscale_h, 0, 
sizeof(
s->upscale_h));
 
  311     memset(
s->upscale_v, 0, 
sizeof(
s->upscale_v));
 
  321     if (
s->avctx->bits_per_raw_sample != 
bits) {
 
  323         s->avctx->bits_per_raw_sample = 
bits;
 
  328     if (
bits == 9 && !
s->pegasus_rct)
 
  331     if(
s->lossless && 
s->avctx->lowres){
 
  340     if (
s->interlaced && 
s->width == 
width && 
s->height == 
height + 1)
 
  346     if (
s->buf_size && (
width + 7) / 8 * ((
height + 7) / 8) > 
s->buf_size * 4LL)
 
  350     if (nb_components <= 0 ||
 
  353     if (
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
 
  354         if (nb_components != 
s->nb_components) {
 
  356                    "nb_components changing in interlaced picture\n");
 
  360     if (
s->ls && !(
bits <= 8 || nb_components == 1)) {
 
  362                                       "JPEG-LS that is not <= 8 " 
  363                                       "bits/component or 16-bit gray");
 
  366     if (
len != 8 + 3 * nb_components) {
 
  367         av_log(
s->avctx, 
AV_LOG_ERROR, 
"decode_sof0: error, len(%d) mismatch %d components\n", 
len, nb_components);
 
  371     s->nb_components = nb_components;
 
  374     for (
i = 0; 
i < nb_components; 
i++) {
 
  380         if (h_count[
i] > 
s->h_max)
 
  381             s->h_max = h_count[
i];
 
  382         if (v_count[
i] > 
s->v_max)
 
  383             s->v_max = v_count[
i];
 
  385         if (
s->quant_index[
i] >= 4) {
 
  389         if (!h_count[
i] || !v_count[
i]) {
 
  391                    "Invalid sampling factor in component %d %d:%d\n",
 
  392                    i, h_count[
i], v_count[
i]);
 
  397                i, h_count[
i], v_count[
i],
 
  398                s->component_id[
i], 
s->quant_index[
i]);
 
  400     if (   nb_components == 4
 
  401         && 
s->component_id[0] == 
'C' 
  402         && 
s->component_id[1] == 
'M' 
  403         && 
s->component_id[2] == 
'Y' 
  404         && 
s->component_id[3] == 
'K')
 
  405         s->adobe_transform = 0;
 
  407     if (
s->ls && (
s->h_max > 1 || 
s->v_max > 1)) {
 
  413         if (nb_components == 2) {
 
  427         memcmp(
s->h_count, h_count, 
sizeof(h_count))                ||
 
  428         memcmp(
s->v_count, v_count, 
sizeof(v_count))) {
 
  434         memcpy(
s->h_count, h_count, 
sizeof(h_count));
 
  435         memcpy(
s->v_count, v_count, 
sizeof(v_count));
 
  440         if (
s->first_picture   &&
 
  441             (
s->multiscope != 2 || 
s->avctx->pkt_timebase.den >= 25 * 
s->avctx->pkt_timebase.num) &&
 
  442             s->orig_height != 0 &&
 
  443             s->height < ((
s->orig_height * 3) / 4)) {
 
  445             s->bottom_field                  = 
s->interlace_polarity;
 
  456             (
s->avctx->codec_tag == 
MKTAG(
'A', 
'V', 
'R', 
'n') ||
 
  457              s->avctx->codec_tag == 
MKTAG(
'A', 
'V', 
'D', 
'J')) &&
 
  461         s->first_picture = 0;
 
  467         s->avctx->height = 
s->avctx->coded_height / 
s->smv_frames_per_jpeg;
 
  468         if (
s->avctx->height <= 0)
 
  471     if (
s->bayer && 
s->progressive) {
 
  476     if (
s->got_picture && 
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
 
  477         if (
s->progressive) {
 
  482         if (
s->v_max == 1 && 
s->h_max == 1 && 
s->lossless==1 && (nb_components==3 || nb_components==4))
 
  484         else if (!
s->lossless)
 
  487         pix_fmt_id = ((unsigned)
s->h_count[0] << 28) | (
s->v_count[0] << 24) |
 
  488                      (
s->h_count[1] << 20) | (
s->v_count[1] << 16) |
 
  489                      (
s->h_count[2] << 12) | (
s->v_count[2] <<  8) |
 
  490                      (
s->h_count[3] <<  4) |  
s->v_count[3];
 
  494         if (!(pix_fmt_id & 0xD0D0D0D0))
 
  495             pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
 
  496         if (!(pix_fmt_id & 0x0D0D0D0D))
 
  497             pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
 
  499         for (
i = 0; 
i < 8; 
i++) {
 
  500             int j = 6 + (
i&1) - (
i&6);
 
  501             int is = (pix_fmt_id >> (4*
i)) & 0xF;
 
  502             int js = (pix_fmt_id >> (4*j)) & 0xF;
 
  504             if (
is == 1 && js != 2 && (i < 2 || i > 5))
 
  505                 js = (pix_fmt_id >> ( 8 + 4*(
i&1))) & 0xF;
 
  506             if (
is == 1 && js != 2 && (i < 2 || i > 5))
 
  507                 js = (pix_fmt_id >> (16 + 4*(
i&1))) & 0xF;
 
  509             if (
is == 1 && js == 2) {
 
  510                 if (
i & 1) 
s->upscale_h[j/2] = 1;
 
  511                 else       s->upscale_v[j/2] = 1;
 
  516             if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
 
  520         switch (pix_fmt_id) {
 
  530                 if (   
s->adobe_transform == 0
 
  531                     || 
s->component_id[0] == 
'R' && 
s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B') {
 
  545                 if (
s->adobe_transform == 0 && 
s->bits <= 8) {
 
  557             if (
s->component_id[0] == 
'R' && 
s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B') {
 
  568             if (
s->adobe_transform == 0 && 
s->bits <= 8) {
 
  570                 s->upscale_v[1] = 
s->upscale_v[2] = 1;
 
  571                 s->upscale_h[1] = 
s->upscale_h[2] = 1;
 
  572             } 
else if (
s->adobe_transform == 2 && 
s->bits <= 8) {
 
  574                 s->upscale_v[1] = 
s->upscale_v[2] = 1;
 
  575                 s->upscale_h[1] = 
s->upscale_h[2] = 1;
 
  594             if (
s->adobe_transform == 0 || 
s->component_id[0] == 
'R' &&
 
  595                     s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B') {
 
  621             if (
s->component_id[0] == 
'R' && 
s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B') {
 
  625                 s->upscale_v[1] = 
s->upscale_v[2] = 1;
 
  627                 if (pix_fmt_id == 0x14111100)
 
  628                     s->upscale_v[1] = 
s->upscale_v[2] = 1;
 
  636             if (
s->component_id[0] == 
'R' && 
s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B') {
 
  640                 s->upscale_h[1] = 
s->upscale_h[2] = 1;
 
  650             if (
s->component_id[0] == 
'R' && 
s->component_id[1] == 
'G' && 
s->component_id[2] == 
'B')
 
  654             s->upscale_h[0] = 
s->upscale_h[2] = 2;
 
  661             s->upscale_h[1] = 
s->upscale_h[2] = 2;
 
  678             if (pix_fmt_id == 0x42111100) {
 
  681                 s->upscale_h[1] = 
s->upscale_h[2] = 1;
 
  682             } 
else if (pix_fmt_id == 0x24111100) {
 
  685                 s->upscale_v[1] = 
s->upscale_v[2] = 1;
 
  686             } 
else if (pix_fmt_id == 0x23111100) {
 
  689                 s->upscale_v[1] = 
s->upscale_v[2] = 2;
 
  701             memset(
s->upscale_h, 0, 
sizeof(
s->upscale_h));
 
  702             memset(
s->upscale_v, 0, 
sizeof(
s->upscale_v));
 
  710             memset(
s->upscale_h, 0, 
sizeof(
s->upscale_h));
 
  711             memset(
s->upscale_v, 0, 
sizeof(
s->upscale_v));
 
  712             if (
s->nb_components == 3) {
 
  714             } 
else if (
s->nb_components != 1) {
 
  717             } 
else if ((
s->palette_index || 
s->force_pal8) && 
s->bits <= 8)
 
  719             else if (
s->bits <= 8)
 
  731         if (
s->avctx->pix_fmt == 
s->hwaccel_sw_pix_fmt && !size_change) {
 
  732             s->avctx->pix_fmt = 
s->hwaccel_pix_fmt;
 
  735 #if CONFIG_MJPEG_NVDEC_HWACCEL 
  738 #if CONFIG_MJPEG_VAAPI_HWACCEL 
  745             if (
s->hwaccel_pix_fmt < 0)
 
  748             s->hwaccel_sw_pix_fmt = 
s->avctx->pix_fmt;
 
  749             s->avctx->pix_fmt     = 
s->hwaccel_pix_fmt;
 
  768             memset(
s->picture_ptr->data[1], 0, 1024);
 
  770         for (
i = 0; 
i < 4; 
i++)
 
  771             s->linesize[
i] = 
s->picture_ptr->linesize[
i] << 
s->interlaced;
 
  773         ff_dlog(
s->avctx, 
"%d %d %d %d %d %d\n",
 
  774                 s->width, 
s->height, 
s->linesize[0], 
s->linesize[1],
 
  775                 s->interlaced, 
s->avctx->height);
 
  779     if ((
s->rgb && !
s->lossless && !
s->ls) ||
 
  780         (!
s->rgb && 
s->ls && 
s->nb_components > 1) ||
 
  788     if (
s->progressive) {
 
  789         int bw = (
width  + 
s->h_max * 8 - 1) / (
s->h_max * 8);
 
  790         int bh = (
height + 
s->v_max * 8 - 1) / (
s->v_max * 8);
 
  791         for (
i = 0; 
i < 
s->nb_components; 
i++) {
 
  792             int size = bw * bh * 
s->h_count[
i] * 
s->v_count[
i];
 
  797             if (!
s->blocks[
i] || !
s->last_nnz[
i])
 
  799             s->block_stride[
i] = bw * 
s->h_count[
i];
 
  801         memset(
s->coefs_finished, 0, 
sizeof(
s->coefs_finished));
 
  804     if (
s->avctx->hwaccel) {
 
  806         s->hwaccel_picture_private =
 
  808         if (!
s->hwaccel_picture_private)
 
  812                                    s->raw_image_buffer_size);
 
  824     if (code < 0 || code > 16) {
 
  826                "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
 
  827                0, dc_index, &
s->vlcs[0][dc_index]);
 
  839                         int dc_index, 
int ac_index, uint16_t *quant_matrix)
 
  845     if (
val == 0xfffff) {
 
  849     val = 
val * (unsigned)quant_matrix[0] + 
s->last_dc[component];
 
  850     s->last_dc[component] = 
val;
 
  859         i += ((unsigned)
code) >> 4;
 
  867                 int sign  = (~cache) >> 31;
 
  877             j        = 
s->permutated_scantable[
i];
 
  887                                  int component, 
int dc_index,
 
  888                                  uint16_t *quant_matrix, 
int Al)
 
  891     s->bdsp.clear_block(
block);
 
  893     if (
val == 0xfffff) {
 
  897     val = (
val * (quant_matrix[0] << Al)) + 
s->last_dc[component];
 
  898     s->last_dc[component] = 
val;
 
  905                                     uint8_t *last_nnz, 
int ac_index,
 
  906                                     uint16_t *quant_matrix,
 
  907                                     int ss, 
int se, 
int Al, 
int *EOBRUN)
 
  919         for (
i = 
ss; ; 
i++) {
 
  932                     int sign  = (~cache) >> 31;
 
  940                         j = 
s->permutated_scantable[
se];
 
  947                 j = 
s->permutated_scantable[
i];
 
  977 #define REFINE_BIT(j) {                                             \ 
  978     UPDATE_CACHE(re, &s->gb);                                       \ 
  979     sign = block[j] >> 15;                                          \ 
  980     block[j] += SHOW_UBITS(re, &s->gb, 1) *                         \ 
  981                 ((quant_matrix[i] ^ sign) - sign) << Al;            \ 
  982     LAST_SKIP_BITS(re, &s->gb, 1);                                  \ 
  990             av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \ 
  995     j = s->permutated_scantable[i];                                 \ 
  998     else if (run-- == 0)                                            \ 
 1005                                    int ac_index, uint16_t *quant_matrix,
 
 1006                                    int ss, 
int se, 
int Al, 
int *EOBRUN)
 
 1009     int last    = 
FFMIN(
se, *last_nnz);
 
 1017             GET_VLC(
code, re, &
s->gb, 
s->vlcs[2][ac_index].table, 9, 2);
 
 1025                 j = 
s->permutated_scantable[
i];
 
 1056     for (; 
i <= last; 
i++) {
 
 1057         j = 
s->permutated_scantable[
i];
 
 1073     if (
s->restart_interval) {
 
 1077             for (
i = 0; 
i < nb_components; 
i++) 
 
 1078                 s->last_dc[
i] = (4 << 
s->bits);
 
 1083         if (
s->restart_count == 0) {
 
 1091                     for (
i = 0; 
i < nb_components; 
i++) 
 
 1092                         s->last_dc[
i] = (4 << 
s->bits);
 
 1108     int left[4], top[4], topleft[4];
 
 1109     const int linesize = 
s->linesize[0];
 
 1110     const int mask     = ((1 << 
s->bits) - 1) << point_transform;
 
 1111     int resync_mb_y = 0;
 
 1112     int resync_mb_x = 0;
 
 1115     if (!
s->bayer && 
s->nb_components < 3)
 
 1117     if (
s->bayer && 
s->nb_components > 2)
 
 1119     if (
s->nb_components <= 0 || 
s->nb_components > 4)
 
 1121     if (
s->v_max != 1 || 
s->h_max != 1 || !
s->lossless)
 
 1124         if (
s->rct || 
s->pegasus_rct)
 
 1129     s->restart_count = 
s->restart_interval;
 
 1131     if (
s->restart_interval == 0)
 
 1132         s->restart_interval = INT_MAX;
 
 1135         width = 
s->mb_width / nb_components; 
 
 1140     if (!
s->ljpeg_buffer)
 
 1145     for (
i = 0; 
i < 4; 
i++)
 
 1148     for (mb_y = 0; mb_y < 
s->mb_height; mb_y++) {
 
 1149         uint8_t *ptr = 
s->picture_ptr->data[0] + (linesize * mb_y);
 
 1151         if (
s->interlaced && 
s->bottom_field)
 
 1152             ptr += linesize >> 1;
 
 1154         for (
i = 0; 
i < 4; 
i++)
 
 1157         if ((mb_y * 
s->width) % 
s->restart_interval == 0) {
 
 1158             for (
i = 0; 
i < 6; 
i++)
 
 1159                 vpred[
i] = 1 << (
s->bits-1);
 
 1162         for (mb_x = 0; mb_x < 
width; mb_x++) {
 
 1170             if (
s->restart_interval && !
s->restart_count){
 
 1171                 s->restart_count = 
s->restart_interval;
 
 1175                     top[
i] = 
left[
i]= topleft[
i]= 1 << (
s->bits - 1);
 
 1177             if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
 
 1178                 modified_predictor = 1;
 
 1180             for (
i=0;
i<nb_components;
i++) {
 
 1183                 topleft[
i] = top[
i];
 
 1190                 if (!
s->bayer || mb_x) {
 
 1200                     mask & (
pred + (unsigned)(
dc * (1 << point_transform)));
 
 1203             if (
s->restart_interval && !--
s->restart_count) {
 
 1208         if (
s->rct && 
s->nb_components == 4) {
 
 1209             for (mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1210                 ptr[4*mb_x + 2] = 
buffer[mb_x][0] - ((
buffer[mb_x][1] + 
buffer[mb_x][2] - 0x200) >> 2);
 
 1211                 ptr[4*mb_x + 1] = 
buffer[mb_x][1] + ptr[4*mb_x + 2];
 
 1212                 ptr[4*mb_x + 3] = 
buffer[mb_x][2] + ptr[4*mb_x + 2];
 
 1213                 ptr[4*mb_x + 0] = 
buffer[mb_x][3];
 
 1215         } 
else if (
s->nb_components == 4) {
 
 1216             for(
i=0; 
i<nb_components; 
i++) {
 
 1217                 int c= 
s->comp_index[
i];
 
 1219                     for(mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1222                 } 
else if(
s->bits == 9) {
 
 1225                     for(mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1226                         ((uint16_t*)ptr)[4*mb_x+
c] = 
buffer[mb_x][
i];
 
 1230         } 
else if (
s->rct) {
 
 1231             for (mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1232                 ptr[3*mb_x + 1] = 
buffer[mb_x][0] - ((
buffer[mb_x][1] + 
buffer[mb_x][2] - 0x200) >> 2);
 
 1233                 ptr[3*mb_x + 0] = 
buffer[mb_x][1] + ptr[3*mb_x + 1];
 
 1234                 ptr[3*mb_x + 2] = 
buffer[mb_x][2] + ptr[3*mb_x + 1];
 
 1236         } 
else if (
s->pegasus_rct) {
 
 1237             for (mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1239                 ptr[3*mb_x + 0] = 
buffer[mb_x][1] + ptr[3*mb_x + 1];
 
 1240                 ptr[3*mb_x + 2] = 
buffer[mb_x][2] + ptr[3*mb_x + 1];
 
 1242         } 
else if (
s->bayer) {
 
 1245             if (nb_components == 1) {
 
 1247                 for (mb_x = 0; mb_x < 
width; mb_x++)
 
 1248                     ((uint16_t*)ptr)[mb_x] = 
buffer[mb_x][0];
 
 1249             } 
else if (nb_components == 2) {
 
 1250                 for (mb_x = 0; mb_x < 
width; mb_x++) {
 
 1251                     ((uint16_t*)ptr)[2*mb_x + 0] = 
buffer[mb_x][0];
 
 1252                     ((uint16_t*)ptr)[2*mb_x + 1] = 
buffer[mb_x][1];
 
 1256             for(
i=0; 
i<nb_components; 
i++) {
 
 1257                 int c= 
s->comp_index[
i];
 
 1259                     for(mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1262                 } 
else if(
s->bits == 9) {
 
 1265                     for(mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1266                         ((uint16_t*)ptr)[3*mb_x+2-
c] = 
buffer[mb_x][
i];
 
 1276                                  int point_transform, 
int nb_components)
 
 1278     int i, mb_x, mb_y, 
mask;
 
 1279     int bits= (
s->bits+7)&~7;
 
 1280     int resync_mb_y = 0;
 
 1281     int resync_mb_x = 0;
 
 1283     point_transform += 
bits - 
s->bits;
 
 1284     mask = ((1 << 
s->bits) - 1) << point_transform;
 
 1286     av_assert0(nb_components>=1 && nb_components<=4);
 
 1288     for (mb_y = 0; mb_y < 
s->mb_height; mb_y++) {
 
 1289         for (mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1294             if (
s->restart_interval && !
s->restart_count){
 
 1295                 s->restart_count = 
s->restart_interval;
 
 1300             if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
 
 1301                 int toprow  = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
 
 1302                 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
 
 1303                 for (
i = 0; 
i < nb_components; 
i++) {
 
 1306                     int n, 
h, v, x, y, 
c, j, linesize;
 
 1307                     n = 
s->nb_blocks[
i];
 
 1308                     c = 
s->comp_index[
i];
 
 1313                     linesize= 
s->linesize[
c];
 
 1315                     if(
bits>8) linesize /= 2;
 
 1317                     for(j=0; j<n; j++) {
 
 1323                         if (   
h * mb_x + x >= 
s->width
 
 1324                             || v * mb_y + y >= 
s->height) {
 
 1326                         } 
else if (
bits<=8) {
 
 1327                             ptr = 
s->picture_ptr->data[
c] + (linesize * (v * mb_y + y)) + (
h * mb_x + x); 
 
 1329                                 if(x==0 && leftcol){
 
 1335                                 if(x==0 && leftcol){
 
 1336                                     pred= ptr[-linesize];
 
 1342                             if (
s->interlaced && 
s->bottom_field)
 
 1343                                 ptr += linesize >> 1;
 
 1345                             *ptr= 
pred + ((unsigned)
dc << point_transform);
 
 1347                             ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x + x)); 
 
 1349                                 if(x==0 && leftcol){
 
 1355                                 if(x==0 && leftcol){
 
 1356                                     pred= ptr16[-linesize];
 
 1362                             if (
s->interlaced && 
s->bottom_field)
 
 1363                                 ptr16 += linesize >> 1;
 
 1365                             *ptr16= 
pred + ((unsigned)
dc << point_transform);
 
 1374                 for (
i = 0; 
i < nb_components; 
i++) {
 
 1377                     int n, 
h, v, x, y, 
c, j, linesize, 
dc;
 
 1378                     n        = 
s->nb_blocks[
i];
 
 1379                     c        = 
s->comp_index[
i];
 
 1384                     linesize = 
s->linesize[
c];
 
 1386                     if(
bits>8) linesize /= 2;
 
 1388                     for (j = 0; j < n; j++) {
 
 1394                         if (   
h * mb_x + x >= 
s->width
 
 1395                             || v * mb_y + y >= 
s->height) {
 
 1397                         } 
else if (
bits<=8) {
 
 1398                             ptr = 
s->picture_ptr->data[
c] +
 
 1399                               (linesize * (v * mb_y + y)) +
 
 1404                             *ptr = 
pred + ((unsigned)
dc << point_transform);
 
 1406                             ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x + x)); 
 
 1410                             *ptr16= 
pred + ((unsigned)
dc << point_transform);
 
 1420             if (
s->restart_interval && !--
s->restart_count) {
 
 1430                                               uint8_t *
dst, 
const uint8_t *
src,
 
 1431                                               int linesize, 
int lowres)
 
 1434     case 0: 
s->hdsp.put_pixels_tab[1][0](
dst, 
src, linesize, 8);
 
 1447     int block_x, block_y;
 
 1448     int size = 8 >> 
s->avctx->lowres;
 
 1450         for (block_y=0; block_y<
size; block_y++)
 
 1451             for (block_x=0; block_x<
size; block_x++)
 
 1452                 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - 
s->bits;
 
 1454         for (block_y=0; block_y<
size; block_y++)
 
 1455             for (block_x=0; block_x<
size; block_x++)
 
 1456                 *(ptr + block_x + block_y*linesize) <<= 8 - 
s->bits;
 
 1461                              int Al, 
const uint8_t *mb_bitmask,
 
 1462                              int mb_bitmask_size,
 
 1465     int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
 
 1470     int bytes_per_pixel = 1 + (
s->bits > 8);
 
 1473         if (mb_bitmask_size != (
s->mb_width * 
s->mb_height + 7)>>3) {
 
 1477         init_get_bits(&mb_bitmask_gb, mb_bitmask, 
s->mb_width * 
s->mb_height);
 
 1480     s->restart_count = 0;
 
 1487     for (
i = 0; 
i < nb_components; 
i++) {
 
 1488         int c   = 
s->comp_index[
i];
 
 1489         data[
c] = 
s->picture_ptr->data[
c];
 
 1490         reference_data[
c] = reference ? reference->
data[
c] : 
NULL;
 
 1491         linesize[
c] = 
s->linesize[
c];
 
 1492         s->coefs_finished[
c] |= 1;
 
 1495     for (mb_y = 0; mb_y < 
s->mb_height; mb_y++) {
 
 1496         for (mb_x = 0; mb_x < 
s->mb_width; mb_x++) {
 
 1499             if (
s->restart_interval && !
s->restart_count)
 
 1500                 s->restart_count = 
s->restart_interval;
 
 1507             for (
i = 0; 
i < nb_components; 
i++) {
 
 1509                 int n, 
h, v, x, y, 
c, j;
 
 1511                 n = 
s->nb_blocks[
i];
 
 1512                 c = 
s->comp_index[
i];
 
 1517                 for (j = 0; j < n; j++) {
 
 1518                     block_offset = (((linesize[
c] * (v * mb_y + y) * 8) +
 
 1519                                      (
h * mb_x + x) * 8 * bytes_per_pixel) >> 
s->avctx->lowres);
 
 1521                     if (
s->interlaced && 
s->bottom_field)
 
 1522                         block_offset += linesize[
c] >> 1;
 
 1523                     if (   8*(
h * mb_x + x) < ((
c == 1) || (
c == 2) ? chroma_width  : 
s->width)
 
 1524                         && 8*(v * mb_y + y) < ((
c == 1) || (
c == 2) ? chroma_height : 
s->height)) {
 
 1525                         ptr = 
data[
c] + block_offset;
 
 1528                     if (!
s->progressive) {
 
 1532                                                 linesize[
c], 
s->avctx->lowres);
 
 1535                             s->bdsp.clear_block(
s->block);
 
 1537                                              s->dc_index[
i], 
s->ac_index[
i],
 
 1538                                              s->quant_matrixes[
s->quant_sindex[
i]]) < 0) {
 
 1540                                        "error y=%d x=%d\n", mb_y, mb_x);
 
 1543                             if (ptr && linesize[
c]) {
 
 1544                                 s->idsp.idct_put(ptr, linesize[
c], 
s->block);
 
 1550                         int block_idx  = 
s->block_stride[
c] * (v * mb_y + y) +
 
 1552                         int16_t *
block = 
s->blocks[
c][block_idx];
 
 1555                                         s->quant_matrixes[
s->quant_sindex[
i]][0] << Al;
 
 1557                                                        s->quant_matrixes[
s->quant_sindex[
i]],
 
 1560                                    "error y=%d x=%d\n", mb_y, mb_x);
 
 1564                     ff_dlog(
s->avctx, 
"mb: %d %d processed\n", mb_y, mb_x);
 
 1565                     ff_dlog(
s->avctx, 
"%d %d %d %d %d %d %d %d \n",
 
 1566                             mb_x, mb_y, x, y, 
c, 
s->bottom_field,
 
 1567                             (v * mb_y + y) * 8, (
h * mb_x + x) * 8);
 
 1582                                             int se, 
int Ah, 
int Al)
 
 1586     int c = 
s->comp_index[0];
 
 1587     uint16_t *quant_matrix = 
s->quant_matrixes[
s->quant_sindex[0]];
 
 1590     if (se < ss || se > 63) {
 
 1597     s->coefs_finished[
c] |= (2ULL << 
se) - (1ULL << 
ss);
 
 1599     s->restart_count = 0;
 
 1601     for (mb_y = 0; mb_y < 
s->mb_height; mb_y++) {
 
 1602         int block_idx    = mb_y * 
s->block_stride[
c];
 
 1603         int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
 
 1604         uint8_t *last_nnz    = &
s->last_nnz[
c][block_idx];
 
 1606             av_log(
s->avctx, 
AV_LOG_ERROR, 
"bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
 
 1609         for (mb_x = 0; mb_x < 
s->mb_width; mb_x++, 
block++, last_nnz++) {
 
 1611                 if (
s->restart_interval && !
s->restart_count)
 
 1612                     s->restart_count = 
s->restart_interval;
 
 1616                                                   quant_matrix, 
ss, 
se, Al, &EOBRUN);
 
 1619                                                    quant_matrix, 
ss, 
se, Al, &EOBRUN);
 
 1625                            "error y=%d x=%d\n", mb_y, mb_x);
 
 1640     const int bytes_per_pixel = 1 + (
s->bits > 8);
 
 1641     const int block_size = 
s->lossless ? 1 : 8;
 
 1643     for (
c = 0; 
c < 
s->nb_components; 
c++) {
 
 1644         uint8_t *
data = 
s->picture_ptr->data[
c];
 
 1645         int linesize  = 
s->linesize[
c];
 
 1646         int h = 
s->h_max / 
s->h_count[
c];
 
 1647         int v = 
s->v_max / 
s->v_count[
c];
 
 1648         int mb_width     = (
s->width  + 
h * block_size - 1) / (
h * block_size);
 
 1649         int mb_height    = (
s->height + v * block_size - 1) / (v * block_size);
 
 1651         if (~
s->coefs_finished[
c])
 
 1654         if (
s->interlaced && 
s->bottom_field)
 
 1655             data += linesize >> 1;
 
 1657         for (mb_y = 0; mb_y < mb_height; mb_y++) {
 
 1658             uint8_t *ptr     = 
data + (mb_y * linesize * 8 >> 
s->avctx->lowres);
 
 1659             int block_idx    = mb_y * 
s->block_stride[
c];
 
 1660             int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
 
 1661             for (mb_x = 0; mb_x < mb_width; mb_x++, 
block++) {
 
 1662                 s->idsp.idct_put(ptr, linesize, *
block);
 
 1665                 ptr += bytes_per_pixel*8 >> 
s->avctx->lowres;
 
 1672                         int mb_bitmask_size, 
const AVFrame *reference)
 
 1676     const int block_size = 
s->lossless ? 1 : 8;
 
 1677     int ilv, prev_shift;
 
 1679     if (!
s->got_picture) {
 
 1681                 "Can not process SOS before SOF, skipping\n");
 
 1686         if (reference->
width  != 
s->picture_ptr->width  ||
 
 1687             reference->
height != 
s->picture_ptr->height ||
 
 1688             reference->
format != 
s->picture_ptr->format) {
 
 1699                                       "decode_sos: nb_components (%d)",
 
 1703     if (
len != 6 + 2 * nb_components) {
 
 1707     for (
i = 0; 
i < nb_components; 
i++) {
 
 1712             if (
id == 
s->component_id[
index])
 
 1714         if (
index == 
s->nb_components) {
 
 1716                    "decode_sos: index(%d) out of components\n", 
index);
 
 1720         if (
s->avctx->codec_tag == 
MKTAG(
'M', 
'T', 
'S', 
'J')
 
 1721             && nb_components == 3 && 
s->nb_components == 3 && 
i)
 
 1724         s->quant_sindex[
i] = 
s->quant_index[
index];
 
 1726         s->h_scount[
i]  = 
s->h_count[
index];
 
 1727         s->v_scount[
i]  = 
s->v_count[
index];
 
 1734         if (
s->dc_index[
i] <  0 || 
s->ac_index[
i] < 0 ||
 
 1735             s->dc_index[
i] >= 4 || 
s->ac_index[
i] >= 4)
 
 1737         if (!
s->vlcs[0][
s->dc_index[
i]].table || !(
s->progressive ? 
s->vlcs[2][
s->ac_index[0]].table : 
s->vlcs[1][
s->ac_index[
i]].table))
 
 1743     if(
s->avctx->codec_tag != 
AV_RL32(
"CJPG")){
 
 1747         prev_shift = point_transform = 0;
 
 1749     if (nb_components > 1) {
 
 1751         s->mb_width  = (
s->width  + 
s->h_max * block_size - 1) / (
s->h_max * block_size);
 
 1752         s->mb_height = (
s->height + 
s->v_max * block_size - 1) / (
s->v_max * block_size);
 
 1753     } 
else if (!
s->ls) { 
 
 1754         h = 
s->h_max / 
s->h_scount[0];
 
 1755         v = 
s->v_max / 
s->v_scount[0];
 
 1756         s->mb_width     = (
s->width  + 
h * block_size - 1) / (
h * block_size);
 
 1757         s->mb_height    = (
s->height + v * block_size - 1) / (v * block_size);
 
 1758         s->nb_blocks[0] = 1;
 
 1765                s->lossless ? 
"lossless" : 
"sequential DCT", 
s->rgb ? 
"RGB" : 
"",
 
 1766                predictor, point_transform, ilv, 
s->bits, 
s->mjpb_skiptosod,
 
 1767                s->pegasus_rct ? 
"PRCT" : (
s->rct ? 
"RCT" : 
""), nb_components);
 
 1771     for (
i = 
s->mjpb_skiptosod; 
i > 0; 
i--)
 
 1775     for (
i = 0; 
i < nb_components; 
i++)
 
 1776         s->last_dc[
i] = (4 << 
s->bits);
 
 1778     if (
s->avctx->hwaccel) {
 
 1781                    s->raw_scan_buffer_size >= bytes_to_start);
 
 1784                          s->raw_scan_buffer      + bytes_to_start,
 
 1785                          s->raw_scan_buffer_size - bytes_to_start);
 
 1789     } 
else if (
s->lossless) {
 
 1791         if (CONFIG_JPEGLS_DECODER && 
s->ls) {
 
 1796                                                 point_transform, ilv)) < 0)
 
 1799             if (
s->rgb || 
s->bayer) {
 
 1805                                                  nb_components)) < 0)
 
 1814                                                         point_transform)) < 0)
 
 1818                                          prev_shift, point_transform,
 
 1819                                          mb_bitmask, mb_bitmask_size, reference)) < 0)
 
 1824     if (
s->interlaced &&
 
 1833             s->bottom_field ^= 1;
 
 1851     s->restart_count    = 0;
 
 1853            s->restart_interval);
 
 1900         int t_w, t_h, v1, v2;
 
 1908         s->avctx->sample_aspect_ratio.num = 
get_bits(&
s->gb, 16);
 
 1909         s->avctx->sample_aspect_ratio.den = 
get_bits(&
s->gb, 16);
 
 1910         if (   
s->avctx->sample_aspect_ratio.num <= 0
 
 1911             || 
s->avctx->sample_aspect_ratio.den <= 0) {
 
 1912             s->avctx->sample_aspect_ratio.num = 0;
 
 1913             s->avctx->sample_aspect_ratio.den = 1;
 
 1918                    "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
 
 1920                    s->avctx->sample_aspect_ratio.num,
 
 1921                    s->avctx->sample_aspect_ratio.den);
 
 1929                 if (
len -10 - (t_w * t_h * 3) > 0)
 
 1930                     len -= t_w * t_h * 3;
 
 1947             av_log(
s->avctx, 
AV_LOG_INFO, 
"mjpeg: Adobe header found, transform=%d\n", 
s->adobe_transform);
 
 1954         int pegasus_rct = 
s->pegasus_rct;
 
 1957                    "Pegasus lossless jpeg header found\n");
 
 1979             if (
rgb != 
s->rgb || pegasus_rct != 
s->pegasus_rct) {
 
 1985         s->pegasus_rct = pegasus_rct;
 
 2025         } 
else if (
type == 1) {
 
 2037             if (!(
flags & 0x04)) {
 
 2047         int ret, le, ifd_offset, bytes_read;
 
 2080     if ((
s->start_code == 
APP1) && (
len > (0x28 - 8))) {
 
 2103         unsigned nummarkers;
 
 2123         if (nummarkers == 0) {
 
 2126         } 
else if (
s->iccnum != 0 && nummarkers != 
s->iccnum) {
 
 2129         } 
else if (seqno > nummarkers) {
 
 2135         if (
s->iccnum == 0) {
 
 2140             s->iccnum = nummarkers;
 
 2143         if (
s->iccentries[seqno - 1].data) {
 
 2148         s->iccentries[seqno - 1].length = 
len;
 
 2150         if (!
s->iccentries[seqno - 1].data) {
 
 2160         if (
s->iccread > 
s->iccnum)
 
 2168                "mjpeg: error, decode_app parser read over the end\n");
 
 2184         for (
i = 0; 
i < 
len - 2; 
i++)
 
 2186         if (
i > 0 && cbuf[
i - 1] == 
'\n')
 
 2195         if (!strncmp(cbuf, 
"AVID", 4)) {
 
 2197         } 
else if (!strcmp(cbuf, 
"CS=ITU601"))
 
 2199         else if ((!strncmp(cbuf, 
"Intel(R) JPEG Library, version 1", 32) && 
s->avctx->codec_tag) ||
 
 2200                  (!strncmp(cbuf, 
"Metasoft MJPEG Codec", 20)))
 
 2202         else if (!strcmp(cbuf, 
"MULTISCOPE II")) {
 
 2203             s->avctx->sample_aspect_ratio = (
AVRational) { 1, 2 };
 
 2215 static int find_marker(
const uint8_t **pbuf_ptr, 
const uint8_t *buf_end)
 
 2217     const uint8_t *buf_ptr;
 
 2222     buf_ptr = *pbuf_ptr;
 
 2223     while (buf_end - buf_ptr > 1) {
 
 2226         if ((v == 0xff) && (v2 >= 
SOF0) && (v2 <= 
COM) && buf_ptr < buf_end) {
 
 2235     ff_dlog(
NULL, 
"find_marker skipped %d bytes\n", skipped);
 
 2236     *pbuf_ptr = buf_ptr;
 
 2241                          const uint8_t **buf_ptr, 
const uint8_t *buf_end,
 
 2242                          const uint8_t **unescaped_buf_ptr,
 
 2243                          int *unescaped_buf_size)
 
 2254         const uint8_t *
src = *buf_ptr;
 
 2255         const uint8_t *ptr = 
src;
 
 2256         uint8_t *
dst = 
s->buffer;
 
 2258         #define copy_data_segment(skip) do {       \ 
 2259             ptrdiff_t length = (ptr - src) - (skip);  \ 
 2261                 memcpy(dst, src, length);             \ 
 2271             while (ptr < buf_end) {
 
 2272                 uint8_t x = *(ptr++);
 
 2276                     while (ptr < buf_end && x == 0xff) {
 
 2291                     if (x < RST0 || x > 
RST7) {
 
 2301         #undef copy_data_segment 
 2303         *unescaped_buf_ptr  = 
s->buffer;
 
 2304         *unescaped_buf_size = 
dst - 
s->buffer;
 
 2305         memset(
s->buffer + *unescaped_buf_size, 0,
 
 2309                (buf_end - *buf_ptr) - (
dst - 
s->buffer));
 
 2311         const uint8_t *
src = *buf_ptr;
 
 2312         uint8_t *
dst  = 
s->buffer;
 
 2318         while (
src + t < buf_end) {
 
 2319             uint8_t x = 
src[t++];
 
 2321                 while ((
src + t < buf_end) && x == 0xff)
 
 2334             uint8_t x = 
src[
b++];
 
 2336             if (x == 0xFF && 
b < t) {
 
 2348         *unescaped_buf_ptr  = 
dst;
 
 2349         *unescaped_buf_size = (bit_count + 7) >> 3;
 
 2350         memset(
s->buffer + *unescaped_buf_size, 0,
 
 2353         *unescaped_buf_ptr  = *buf_ptr;
 
 2354         *unescaped_buf_size = buf_end - *buf_ptr;
 
 2364     if (
s->iccentries) {
 
 2365         for (
i = 0; 
i < 
s->iccnum; 
i++)
 
 2375                                    int *got_frame, 
const AVPacket *avpkt,
 
 2376                                    const uint8_t *buf, 
const int buf_size)
 
 2379     const uint8_t *buf_end, *buf_ptr;
 
 2380     const uint8_t *unescaped_buf_ptr;
 
 2382     int unescaped_buf_size;
 
 2391     s->buf_size = buf_size;
 
 2395     s->adobe_transform = -1;
 
 2402     buf_end = buf + buf_size;
 
 2403     while (buf_ptr < buf_end) {
 
 2407                                           &unescaped_buf_size);
 
 2411         } 
else if (unescaped_buf_size > INT_MAX / 8) {
 
 2413                    "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
 
 2453         if (!CONFIG_JPEGLS_DECODER &&
 
 2477             s->restart_interval = 0;
 
 2478             s->restart_count    = 0;
 
 2479             s->raw_image_buffer      = buf_ptr;
 
 2480             s->raw_image_buffer_size = buf_end - buf_ptr;
 
 2511 #if FF_API_CODEC_PROPS 
 2524 #if FF_API_CODEC_PROPS 
 2536             if (!CONFIG_JPEGLS_DECODER ||
 
 2545                 s->progressive && 
s->cur_scan && 
s->got_picture)
 
 2548             if (!
s->got_picture) {
 
 2550                        "Found EOI before any SOF, ignoring\n");
 
 2553             if (
s->interlaced) {
 
 2554                 s->bottom_field ^= 1;
 
 2556                 if (
s->bottom_field == !
s->interlace_polarity)
 
 2561                 goto the_end_no_picture;
 
 2587             s->raw_scan_buffer      = buf_ptr;
 
 2588             s->raw_scan_buffer_size = buf_end - buf_ptr;
 
 2615                    "mjpeg: unsupported coding type (%x)\n", 
start_code);
 
 2623                "marker parser used %d bytes (%d bits)\n",
 
 2626     if (
s->got_picture && 
s->cur_scan) {
 
 2661         for (p = 0; p<
s->nb_components; p++) {
 
 2662             uint8_t *
line = 
s->picture_ptr->data[p];
 
 2665             if (!
s->upscale_h[p])
 
 2671             if (
s->upscale_v[p] == 1)
 
 2674             for (
int i = 0; 
i < 
h; 
i++) {
 
 2675                 if (
s->upscale_h[p] == 1) {
 
 2676                     if (is16bit) ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 2];
 
 2684                 } 
else if (
s->upscale_h[p] == 2) {
 
 2686                         ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 3];
 
 2688                             ((uint16_t*)
line)[
w - 2] = ((uint16_t*)
line)[
w - 1];
 
 2697                 } 
else if (
s->upscale_h[p] == 4){
 
 2699                         uint16_t *line16 = (uint16_t *) 
line;
 
 2700                         line16[
w - 1] = line16[(
w - 1) >> 2];
 
 2702                             line16[
w - 2] = (line16[(
w - 1) >> 2] * 3 + line16[(
w - 2) >> 2]) >> 2;
 
 2704                             line16[
w - 3] = (line16[(
w - 1) >> 2] + line16[(
w - 2) >> 2]) >> 1;
 
 2716                 line += 
s->linesize[p];
 
 2741         for (p = 0; p < 
s->nb_components; p++) {
 
 2745             if (!
s->upscale_v[p])
 
 2751             dst = &((uint8_t *)
s->picture_ptr->data[p])[(
h - 1) * 
s->linesize[p]];
 
 2753                 uint8_t *
src1 = &((uint8_t *)
s->picture_ptr->data[p])[
i * 
s->upscale_v[p] / (
s->upscale_v[p] + 1) * 
s->linesize[p]];
 
 2754                 uint8_t *
src2 = &((uint8_t *)
s->picture_ptr->data[p])[(
i + 1) * 
s->upscale_v[p] / (
s->upscale_v[p] + 1) * 
s->linesize[p]];
 
 2761                 dst -= 
s->linesize[p];
 
 2765     if (
s->flipped && !
s->rgb) {
 
 2791         int w = 
s->picture_ptr->width;
 
 2792         int h = 
s->picture_ptr->height;
 
 2794         for (
int i = 0; 
i < 
h; 
i++) {
 
 2799                              + 
s->picture_ptr->linesize[
index]*
i;
 
 2801             for (j=0; j<
w; j++) {
 
 2803                 int r = 
dst[0][j] * k;
 
 2804                 int g = 
dst[1][j] * k;
 
 2805                 int b = 
dst[2][j] * k;
 
 2806                 dst[0][j] = 
g*257 >> 16;
 
 2807                 dst[1][j] = 
b*257 >> 16;
 
 2808                 dst[2][j] = 
r*257 >> 16;
 
 2810             memset(
dst[3], 255, 
w);
 
 2814         int w = 
s->picture_ptr->width;
 
 2815         int h = 
s->picture_ptr->height;
 
 2817         for (
int i = 0; 
i < 
h; 
i++) {
 
 2822                              + 
s->picture_ptr->linesize[
index]*
i;
 
 2824             for (j=0; j<
w; j++) {
 
 2826                 int r = (255 - 
dst[0][j]) * k;
 
 2827                 int g = (128 - 
dst[1][j]) * k;
 
 2828                 int b = (128 - 
dst[2][j]) * k;
 
 2829                 dst[0][j] = 
r*257 >> 16;
 
 2830                 dst[1][j] = (
g*257 >> 16) + 128;
 
 2831                 dst[2][j] = (
b*257 >> 16) + 128;
 
 2833             memset(
dst[3], 255, 
w);
 
 2840             stereo->
type  = 
s->stereo3d->type;
 
 2841             stereo->
flags = 
s->stereo3d->flags;
 
 2846     if (
s->iccnum != 0 && 
s->iccnum == 
s->iccread) {
 
 2852         for (
int i = 0; 
i < 
s->iccnum; 
i++)
 
 2853             total_size += 
s->iccentries[
i].length;
 
 2863             for (
int i = 0; 
i < 
s->iccnum; 
i++) {
 
 2864                 memcpy(sd->
data + 
offset, 
s->iccentries[
i].data, 
s->iccentries[
i].length);
 
 2872         int orientation = strtol(
value, &endptr, 0);
 
 2877             if (orientation >= 2 && orientation <= 8) {
 
 2888                 switch (orientation) {
 
 2935     return buf_ptr - buf;
 
 2953     if (
s->interlaced && 
s->bottom_field == !
s->interlace_polarity && 
s->got_picture && !avctx->
frame_num) {
 
 2958     s->picture_ptr = 
NULL;
 
 2965     s->ljpeg_buffer_size = 0;
 
 2967     for (
i = 0; 
i < 3; 
i++) {
 
 2968         for (j = 0; j < 4; j++)
 
 2990     s->smv_next_frame = 0;
 
 2994 #if CONFIG_MJPEG_DECODER 
 2995 #define OFFSET(x) offsetof(MJpegDecodeContext, x) 
 2996 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 
 2998     { 
"extern_huff", 
"Use external huffman table.",
 
 3003 static const AVClass mjpegdec_class = {
 
 3022     .p.priv_class   = &mjpegdec_class,
 
 3028 #if CONFIG_MJPEG_NVDEC_HWACCEL 
 3031 #if CONFIG_MJPEG_VAAPI_HWACCEL 
 3038 #if CONFIG_THP_DECODER 
 3055 #if CONFIG_SMVJPEG_DECODER 
 3070         s->smv_frame->pts += 
s->smv_frame->duration;
 
 3071     s->smv_next_frame = (
s->smv_next_frame + 1) % 
s->smv_frames_per_jpeg;
 
 3073     if (
s->smv_next_frame == 0)
 
 3084     if (
s->smv_next_frame > 0)
 
 3094     s->smv_frame->pkt_dts = 
pkt->
dts;
 
 3103     s->smv_frame->duration /= 
s->smv_frames_per_jpeg;
 
 3111     smv_process_frame(avctx, 
frame);
 
 3116     .
p.
name         = 
"smvjpeg",
 
  
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
 
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
 
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
 
#define FF_ENABLE_DEPRECATION_WARNINGS
 
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
 
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
 
#define AV_LOG_WARNING
Something somehow does not look correct.
 
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
 
AVPixelFormat
Pixel format.
 
#define AV_EF_EXPLODE
abort decoding on minor error detection
 
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
 
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
 
static int get_bits_left(GetBitContext *gb)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
static int decode_slice(AVCodecContext *c, void *arg)
 
enum AVColorSpace colorspace
YUV colorspace type.
 
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
 
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
 
The official guide to swscale for confused that is
 
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
 
static void decode_flush(AVCodecContext *avctx)
 
static av_always_inline int bytestream2_tell(const GetByteContext *g)
 
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
 
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
 
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
 
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
 
const FFCodec ff_smvjpeg_decoder
 
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
 
#define se(name, range_min, range_max)
 
static int get_bits_count(const GetBitContext *s)
 
static void init_idct(AVCodecContext *avctx)
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
 
This structure describes decoded (raw) audio or video data.
 
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
 
#define AV_PIX_FMT_YUVA420P16
 
@ AVCOL_RANGE_JPEG
Full range content.
 
const FFCodec ff_mjpeg_decoder
 
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
 
enum AVFieldOrder field_order
Field order.
 
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
 
int step
Number of elements between 2 horizontally consecutive pixels.
 
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
 
const uint8_t ff_mjpeg_val_dc[]
 
#define FF_HW_SIMPLE_CALL(avctx, function)
 
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
 
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
 
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
 
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
 
#define UPDATE_CACHE(name, gb)
 
const uint8_t ff_mjpeg_bits_ac_chrominance[]
 
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
 
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
 
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
 
#define FF_DEBUG_PICT_INFO
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
 
#define GET_CACHE(name, gb)
 
static void skip_bits(GetBitContext *s, int n)
 
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
 
static av_cold void close(AVCodecParserContext *s)
 
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
 
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
 
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
 
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
 
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
 
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
 
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
 
AVCodec p
The public AVCodec.
 
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
 
const struct AVCodec * codec
 
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
 
enum AVDiscard skip_frame
Skip decoding for selected frames.
 
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
 
#define AV_PIX_FMT_YUVA444P16
 
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
 
static int mjpeg_decode_com(MJpegDecodeContext *s)
 
static int init_default_huffman_tables(MJpegDecodeContext *s)
 
int flags
AV_CODEC_FLAG_*.
 
static double val(void *priv, double ch)
 
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
 
#define AV_PIX_FMT_GRAY16
 
#define ss(width, name, subs,...)
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
 
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
 
const AVProfile ff_mjpeg_profiles[]
 
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
 
static int aligned(int val)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
#define FF_ARRAY_ELEMS(a)
 
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
 
#define AV_PIX_FMT_YUV422P16
 
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
 
#define FF_CODEC_PROPERTY_LOSSLESS
 
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
 
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
 
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
 
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
 
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
 
#define CLOSE_READER(name, gb)
 
#define FF_CODEC_DECODE_CB(func)
 
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
 
av_cold void ff_blockdsp_init(BlockDSPContext *c)
 
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
 
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
 
#define AV_PIX_FMT_YUV444P16
 
#define AV_CEIL_RSHIFT(a, b)
 
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
 
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
static enum AVPixelFormat pix_fmts[]
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
#define AV_PIX_FMT_YUV420P16
 
static void reset_icc_profile(MJpegDecodeContext *s)
 
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
 
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
 
#define CODEC_LONG_NAME(str)
 
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
 
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
 
int flags
Additional information about the frame packing.
 
@ AVDISCARD_ALL
discard all
 
#define AV_PIX_FMT_GBRP16
 
#define AV_PIX_FMT_RGBA64
 
#define LIBAVUTIL_VERSION_INT
 
Describe the class of an AVClass context structure.
 
#define PTRDIFF_SPECIFIER
 
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
 
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
 
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
 
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
 
Rational number (pair of numerator and denominator).
 
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
 
struct AVCodecInternal * internal
Private context used for internal data.
 
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
 
const char * av_default_item_name(void *ptr)
Return the context name.
 
static unsigned int get_bits1(GetBitContext *s)
 
@ AV_PICTURE_TYPE_I
Intra.
 
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
 
#define LAST_SKIP_BITS(name, gb, num)
 
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
 
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
 
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
 
const uint8_t ff_mjpeg_val_ac_chrominance[]
 
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
 
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
 
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
 
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
 
#define copy_data_segment(skip)
 
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
 
const OptionDef options[]
 
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
 
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
 
int(* init)(AVBSFContext *ctx)
 
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
 
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
 
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
 
const uint8_t ff_mjpeg_val_ac_luminance[]
 
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
 
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
 
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
 
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
 
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
 
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
 
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
 
#define AV_PROFILE_MJPEG_JPEG_LS
 
const uint8_t ff_mjpeg_bits_ac_luminance[]
 
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
 
#define AV_NOPTS_VALUE
Undefined timestamp value.
 
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
 
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
 
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
 
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
 
#define OPEN_READER(name, gb)
 
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
 
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
 
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
 
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
 
#define HWACCEL_NVDEC(codec)
 
static void predictor(uint8_t *src, ptrdiff_t size)
 
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
 
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
 
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
 
#define AV_LOG_INFO
Standard information.
 
const FFCodec ff_thp_decoder
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
 
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
 
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
 
#define i(width, name, range_min, range_max)
 
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
 
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
 
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
 
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
 
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
 
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
 
static int mjpeg_decode_dri(MJpegDecodeContext *s)
 
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
 
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
 
#define FF_DEBUG_STARTCODE
 
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
 
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
 
const char * name
Name of the codec implementation.
 
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
 
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
 
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
 
void * av_calloc(size_t nmemb, size_t size)
 
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
 
const uint8_t ff_zigzag_direct[64]
 
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
 
int64_t frame_num
Frame counter, set by libavcodec.
 
void ff_vlc_free(VLC *vlc)
 
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
 
static const float pred[4]
 
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
 
#define FFSWAP(type, a, b)
 
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
enum AVStereo3DType type
How views are packed within the video.
 
static const uint8_t * align_get_bits(GetBitContext *s)
 
static const char * hwaccel
 
@ LSE
JPEG-LS extension parameters.
 
#define AV_INPUT_BUFFER_PADDING_SIZE
 
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
 
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
 
main external API structure.
 
#define FF_CODEC_RECEIVE_FRAME_CB(func)
 
#define SHOW_UBITS(name, gb, num)
 
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
 
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
 
#define FF_HW_CALL(avctx, function,...)
 
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
 
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
 
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
 
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
 
const uint8_t ff_mjpeg_bits_dc_chrominance[]
 
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
 
#define FF_DISABLE_DEPRECATION_WARNINGS
 
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
 
int coded_width
Bitstream width / height, may be different from width/height e.g.
 
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
 
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
 
static int mjpeg_decode_app(MJpegDecodeContext *s)
 
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
 
#define avpriv_request_sample(...)
 
Structure to hold side data for an AVFrame.
 
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
 
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
 
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
 
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
 
This structure stores compressed data.
 
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
 
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
 
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
 
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
 
#define HWACCEL_VAAPI(codec)
 
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
 
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
 
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
 
static const SheerTable rgb[2]
 
The exact code depends on how similar the blocks are and how related they are to the block
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
#define MKTAG(a, b, c, d)
 
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
 
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
 
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
 
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
 
#define PREDICT(ret, topleft, top, left, predictor)
 
static int return_frame(AVFilterContext *ctx, int is_second)
 
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
 
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
 
#define av_fourcc2str(fourcc)