00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "get_bits.h"
00033 #include "put_bits.h"
00034 #include "dsputil.h"
00035 #include "thread.h"
00036
00037 #define VLC_BITS 11
00038
00039 #if HAVE_BIGENDIAN
00040 #define B 3
00041 #define G 2
00042 #define R 1
00043 #define A 0
00044 #else
00045 #define B 0
00046 #define G 1
00047 #define R 2
00048 #define A 3
00049 #endif
00050
00051 typedef enum Predictor{
00052 LEFT= 0,
00053 PLANE,
00054 MEDIAN,
00055 } Predictor;
00056
00057 typedef struct HYuvContext{
00058 AVCodecContext *avctx;
00059 Predictor predictor;
00060 GetBitContext gb;
00061 PutBitContext pb;
00062 int interlaced;
00063 int decorrelate;
00064 int bitstream_bpp;
00065 int version;
00066 int yuy2;
00067 int bgr32;
00068 int width, height;
00069 int flags;
00070 int context;
00071 int picture_number;
00072 int last_slice_end;
00073 uint8_t *temp[3];
00074 uint64_t stats[3][256];
00075 uint8_t len[3][256];
00076 uint32_t bits[3][256];
00077 uint32_t pix_bgr_map[1<<VLC_BITS];
00078 VLC vlc[6];
00079 AVFrame picture;
00080 uint8_t *bitstream_buffer;
00081 unsigned int bitstream_buffer_size;
00082 DSPContext dsp;
00083 }HYuvContext;
00084
00085 static const unsigned char classic_shift_luma[] = {
00086 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00087 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00088 69,68, 0
00089 };
00090
00091 static const unsigned char classic_shift_chroma[] = {
00092 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00093 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00094 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
00095 };
00096
00097 static const unsigned char classic_add_luma[256] = {
00098 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00099 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00114 };
00115
00116 static const unsigned char classic_add_chroma[256] = {
00117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00133 };
00134
00135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
00136 int i;
00137 if(w<32){
00138 for(i=0; i<w; i++){
00139 const int temp= src[i];
00140 dst[i]= temp - left;
00141 left= temp;
00142 }
00143 return left;
00144 }else{
00145 for(i=0; i<16; i++){
00146 const int temp= src[i];
00147 dst[i]= temp - left;
00148 left= temp;
00149 }
00150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
00151 return src[w-1];
00152 }
00153 }
00154
00155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
00156 int i;
00157 int r,g,b,a;
00158 r= *red;
00159 g= *green;
00160 b= *blue;
00161 a= *alpha;
00162 for(i=0; i<FFMIN(w,4); i++){
00163 const int rt= src[i*4+R];
00164 const int gt= src[i*4+G];
00165 const int bt= src[i*4+B];
00166 const int at= src[i*4+A];
00167 dst[i*4+R]= rt - r;
00168 dst[i*4+G]= gt - g;
00169 dst[i*4+B]= bt - b;
00170 dst[i*4+A]= at - a;
00171 r = rt;
00172 g = gt;
00173 b = bt;
00174 a = at;
00175 }
00176 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
00177 *red= src[(w-1)*4+R];
00178 *green= src[(w-1)*4+G];
00179 *blue= src[(w-1)*4+B];
00180 *alpha= src[(w-1)*4+A];
00181 }
00182
00183 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
00184 int i;
00185 int r,g,b;
00186 r= *red;
00187 g= *green;
00188 b= *blue;
00189 for(i=0; i<FFMIN(w,16); i++){
00190 const int rt= src[i*3+0];
00191 const int gt= src[i*3+1];
00192 const int bt= src[i*3+2];
00193 dst[i*3+0]= rt - r;
00194 dst[i*3+1]= gt - g;
00195 dst[i*3+2]= bt - b;
00196 r = rt;
00197 g = gt;
00198 b = bt;
00199 }
00200 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
00201 *red= src[(w-1)*3+0];
00202 *green= src[(w-1)*3+1];
00203 *blue= src[(w-1)*3+2];
00204 }
00205
00206 static int read_len_table(uint8_t *dst, GetBitContext *gb){
00207 int i, val, repeat;
00208
00209 for(i=0; i<256;){
00210 repeat= get_bits(gb, 3);
00211 val = get_bits(gb, 5);
00212 if(repeat==0)
00213 repeat= get_bits(gb, 8);
00214
00215 if(i+repeat > 256) {
00216 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00217 return -1;
00218 }
00219 while (repeat--)
00220 dst[i++] = val;
00221 }
00222 return 0;
00223 }
00224
00225 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
00226 int len, index;
00227 uint32_t bits=0;
00228
00229 for(len=32; len>0; len--){
00230 for(index=0; index<256; index++){
00231 if(len_table[index]==len)
00232 dst[index]= bits++;
00233 }
00234 if(bits & 1){
00235 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00236 return -1;
00237 }
00238 bits >>= 1;
00239 }
00240 return 0;
00241 }
00242
00243 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00244 typedef struct {
00245 uint64_t val;
00246 int name;
00247 } HeapElem;
00248
00249 static void heap_sift(HeapElem *h, int root, int size)
00250 {
00251 while(root*2+1 < size) {
00252 int child = root*2+1;
00253 if(child < size-1 && h[child].val > h[child+1].val)
00254 child++;
00255 if(h[root].val > h[child].val) {
00256 FFSWAP(HeapElem, h[root], h[child]);
00257 root = child;
00258 } else
00259 break;
00260 }
00261 }
00262
00263 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
00264 HeapElem h[256];
00265 int up[2*256];
00266 int len[2*256];
00267 int offset, i, next;
00268 int size = 256;
00269
00270 for(offset=1; ; offset<<=1){
00271 for(i=0; i<size; i++){
00272 h[i].name = i;
00273 h[i].val = (stats[i] << 8) + offset;
00274 }
00275 for(i=size/2-1; i>=0; i--)
00276 heap_sift(h, i, size);
00277
00278 for(next=size; next<size*2-1; next++){
00279
00280 uint64_t min1v = h[0].val;
00281 up[h[0].name] = next;
00282 h[0].val = INT64_MAX;
00283 heap_sift(h, 0, size);
00284 up[h[0].name] = next;
00285 h[0].name = next;
00286 h[0].val += min1v;
00287 heap_sift(h, 0, size);
00288 }
00289
00290 len[2*size-2] = 0;
00291 for(i=2*size-3; i>=size; i--)
00292 len[i] = len[up[i]] + 1;
00293 for(i=0; i<size; i++) {
00294 dst[i] = len[up[i]] + 1;
00295 if(dst[i] >= 32) break;
00296 }
00297 if(i==size) break;
00298 }
00299 }
00300 #endif
00301
00302 static void generate_joint_tables(HYuvContext *s){
00303 uint16_t symbols[1<<VLC_BITS];
00304 uint16_t bits[1<<VLC_BITS];
00305 uint8_t len[1<<VLC_BITS];
00306 if(s->bitstream_bpp < 24){
00307 int p, i, y, u;
00308 for(p=0; p<3; p++){
00309 for(i=y=0; y<256; y++){
00310 int len0 = s->len[0][y];
00311 int limit = VLC_BITS - len0;
00312 if(limit <= 0)
00313 continue;
00314 for(u=0; u<256; u++){
00315 int len1 = s->len[p][u];
00316 if(len1 > limit)
00317 continue;
00318 len[i] = len0 + len1;
00319 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00320 symbols[i] = (y<<8) + u;
00321 if(symbols[i] != 0xffff)
00322 i++;
00323 }
00324 }
00325 free_vlc(&s->vlc[3+p]);
00326 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
00327 }
00328 }else{
00329 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00330 int i, b, g, r, code;
00331 int p0 = s->decorrelate;
00332 int p1 = !s->decorrelate;
00333
00334
00335
00336 for(i=0, g=-16; g<16; g++){
00337 int len0 = s->len[p0][g&255];
00338 int limit0 = VLC_BITS - len0;
00339 if(limit0 < 2)
00340 continue;
00341 for(b=-16; b<16; b++){
00342 int len1 = s->len[p1][b&255];
00343 int limit1 = limit0 - len1;
00344 if(limit1 < 1)
00345 continue;
00346 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
00347 for(r=-16; r<16; r++){
00348 int len2 = s->len[2][r&255];
00349 if(len2 > limit1)
00350 continue;
00351 len[i] = len0 + len1 + len2;
00352 bits[i] = (code << len2) + s->bits[2][r&255];
00353 if(s->decorrelate){
00354 map[i][G] = g;
00355 map[i][B] = g+b;
00356 map[i][R] = g+r;
00357 }else{
00358 map[i][B] = g;
00359 map[i][G] = b;
00360 map[i][R] = r;
00361 }
00362 i++;
00363 }
00364 }
00365 }
00366 free_vlc(&s->vlc[3]);
00367 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
00368 }
00369 }
00370
00371 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
00372 GetBitContext gb;
00373 int i;
00374
00375 init_get_bits(&gb, src, length*8);
00376
00377 for(i=0; i<3; i++){
00378 if(read_len_table(s->len[i], &gb)<0)
00379 return -1;
00380 if(generate_bits_table(s->bits[i], s->len[i])<0){
00381 return -1;
00382 }
00383 free_vlc(&s->vlc[i]);
00384 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00385 }
00386
00387 generate_joint_tables(s);
00388
00389 return (get_bits_count(&gb)+7)/8;
00390 }
00391
00392 static int read_old_huffman_tables(HYuvContext *s){
00393 #if 1
00394 GetBitContext gb;
00395 int i;
00396
00397 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
00398 if(read_len_table(s->len[0], &gb)<0)
00399 return -1;
00400 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
00401 if(read_len_table(s->len[1], &gb)<0)
00402 return -1;
00403
00404 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00405 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00406
00407 if(s->bitstream_bpp >= 24){
00408 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
00409 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
00410 }
00411 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
00412 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
00413
00414 for(i=0; i<3; i++){
00415 free_vlc(&s->vlc[i]);
00416 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00417 }
00418
00419 generate_joint_tables(s);
00420
00421 return 0;
00422 #else
00423 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
00424 return -1;
00425 #endif
00426 }
00427
00428 static av_cold void alloc_temp(HYuvContext *s){
00429 int i;
00430
00431 if(s->bitstream_bpp<24){
00432 for(i=0; i<3; i++){
00433 s->temp[i]= av_malloc(s->width + 16);
00434 }
00435 }else{
00436 s->temp[0]= av_mallocz(4*s->width + 16);
00437 }
00438 }
00439
00440 static av_cold int common_init(AVCodecContext *avctx){
00441 HYuvContext *s = avctx->priv_data;
00442
00443 s->avctx= avctx;
00444 s->flags= avctx->flags;
00445
00446 dsputil_init(&s->dsp, avctx);
00447
00448 s->width= avctx->width;
00449 s->height= avctx->height;
00450 assert(s->width>0 && s->height>0);
00451
00452 return 0;
00453 }
00454
00455 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00456 static av_cold int decode_init(AVCodecContext *avctx)
00457 {
00458 HYuvContext *s = avctx->priv_data;
00459
00460 common_init(avctx);
00461 memset(s->vlc, 0, 3*sizeof(VLC));
00462
00463 avctx->coded_frame= &s->picture;
00464 avcodec_get_frame_defaults(&s->picture);
00465 s->interlaced= s->height > 288;
00466
00467 s->bgr32=1;
00468
00469
00470 if(avctx->extradata_size){
00471 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
00472 s->version=1;
00473 else
00474 s->version=2;
00475 }else
00476 s->version=0;
00477
00478 if(s->version==2){
00479 int method, interlace;
00480
00481 if (avctx->extradata_size < 4)
00482 return -1;
00483
00484 method= ((uint8_t*)avctx->extradata)[0];
00485 s->decorrelate= method&64 ? 1 : 0;
00486 s->predictor= method&63;
00487 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
00488 if(s->bitstream_bpp==0)
00489 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
00490 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00491 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
00492 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00493
00494 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
00495 return -1;
00496 }else{
00497 switch(avctx->bits_per_coded_sample&7){
00498 case 1:
00499 s->predictor= LEFT;
00500 s->decorrelate= 0;
00501 break;
00502 case 2:
00503 s->predictor= LEFT;
00504 s->decorrelate= 1;
00505 break;
00506 case 3:
00507 s->predictor= PLANE;
00508 s->decorrelate= avctx->bits_per_coded_sample >= 24;
00509 break;
00510 case 4:
00511 s->predictor= MEDIAN;
00512 s->decorrelate= 0;
00513 break;
00514 default:
00515 s->predictor= LEFT;
00516 s->decorrelate= 0;
00517 break;
00518 }
00519 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
00520 s->context= 0;
00521
00522 if(read_old_huffman_tables(s) < 0)
00523 return -1;
00524 }
00525
00526 switch(s->bitstream_bpp){
00527 case 12:
00528 avctx->pix_fmt = PIX_FMT_YUV420P;
00529 break;
00530 case 16:
00531 if(s->yuy2){
00532 avctx->pix_fmt = PIX_FMT_YUYV422;
00533 }else{
00534 avctx->pix_fmt = PIX_FMT_YUV422P;
00535 }
00536 break;
00537 case 24:
00538 case 32:
00539 if(s->bgr32){
00540 avctx->pix_fmt = PIX_FMT_RGB32;
00541 }else{
00542 avctx->pix_fmt = PIX_FMT_BGR24;
00543 }
00544 break;
00545 default:
00546 assert(0);
00547 }
00548
00549 alloc_temp(s);
00550
00551
00552
00553 return 0;
00554 }
00555
00556 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00557 {
00558 HYuvContext *s = avctx->priv_data;
00559 int i;
00560
00561 avctx->coded_frame= &s->picture;
00562 alloc_temp(s);
00563
00564 for (i = 0; i < 6; i++)
00565 s->vlc[i].table = NULL;
00566
00567 if(s->version==2){
00568 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
00569 return -1;
00570 }else{
00571 if(read_old_huffman_tables(s) < 0)
00572 return -1;
00573 }
00574
00575 return 0;
00576 }
00577 #endif
00578
00579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00580 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
00581 int i;
00582 int index= 0;
00583
00584 for(i=0; i<256;){
00585 int val= len[i];
00586 int repeat=0;
00587
00588 for(; i<256 && len[i]==val && repeat<255; i++)
00589 repeat++;
00590
00591 assert(val < 32 && val >0 && repeat<256 && repeat>0);
00592 if(repeat>7){
00593 buf[index++]= val;
00594 buf[index++]= repeat;
00595 }else{
00596 buf[index++]= val | (repeat<<5);
00597 }
00598 }
00599
00600 return index;
00601 }
00602
00603 static av_cold int encode_init(AVCodecContext *avctx)
00604 {
00605 HYuvContext *s = avctx->priv_data;
00606 int i, j;
00607
00608 common_init(avctx);
00609
00610 avctx->extradata= av_mallocz(1024*30);
00611 avctx->stats_out= av_mallocz(1024*30);
00612 s->version=2;
00613
00614 avctx->coded_frame= &s->picture;
00615
00616 switch(avctx->pix_fmt){
00617 case PIX_FMT_YUV420P:
00618 s->bitstream_bpp= 12;
00619 break;
00620 case PIX_FMT_YUV422P:
00621 s->bitstream_bpp= 16;
00622 break;
00623 case PIX_FMT_RGB32:
00624 s->bitstream_bpp= 32;
00625 break;
00626 case PIX_FMT_RGB24:
00627 s->bitstream_bpp= 24;
00628 break;
00629 default:
00630 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00631 return -1;
00632 }
00633 avctx->bits_per_coded_sample= s->bitstream_bpp;
00634 s->decorrelate= s->bitstream_bpp >= 24;
00635 s->predictor= avctx->prediction_method;
00636 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00637 if(avctx->context_model==1){
00638 s->context= avctx->context_model;
00639 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
00640 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
00641 return -1;
00642 }
00643 }else s->context= 0;
00644
00645 if(avctx->codec->id==CODEC_ID_HUFFYUV){
00646 if(avctx->pix_fmt==PIX_FMT_YUV420P){
00647 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
00648 return -1;
00649 }
00650 if(avctx->context_model){
00651 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
00652 return -1;
00653 }
00654 if(s->interlaced != ( s->height > 288 ))
00655 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
00656 }
00657
00658 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
00659 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
00660 return -1;
00661 }
00662
00663 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
00664 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
00665 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
00666 if(s->context)
00667 ((uint8_t*)avctx->extradata)[2]|= 0x40;
00668 ((uint8_t*)avctx->extradata)[3]= 0;
00669 s->avctx->extradata_size= 4;
00670
00671 if(avctx->stats_in){
00672 char *p= avctx->stats_in;
00673
00674 for(i=0; i<3; i++)
00675 for(j=0; j<256; j++)
00676 s->stats[i][j]= 1;
00677
00678 for(;;){
00679 for(i=0; i<3; i++){
00680 char *next;
00681
00682 for(j=0; j<256; j++){
00683 s->stats[i][j]+= strtol(p, &next, 0);
00684 if(next==p) return -1;
00685 p=next;
00686 }
00687 }
00688 if(p[0]==0 || p[1]==0 || p[2]==0) break;
00689 }
00690 }else{
00691 for(i=0; i<3; i++)
00692 for(j=0; j<256; j++){
00693 int d= FFMIN(j, 256-j);
00694
00695 s->stats[i][j]= 100000000/(d+1);
00696 }
00697 }
00698
00699 for(i=0; i<3; i++){
00700 generate_len_table(s->len[i], s->stats[i]);
00701
00702 if(generate_bits_table(s->bits[i], s->len[i])<0){
00703 return -1;
00704 }
00705
00706 s->avctx->extradata_size+=
00707 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00708 }
00709
00710 if(s->context){
00711 for(i=0; i<3; i++){
00712 int pels = s->width*s->height / (i?40:10);
00713 for(j=0; j<256; j++){
00714 int d= FFMIN(j, 256-j);
00715 s->stats[i][j]= pels/(d+1);
00716 }
00717 }
00718 }else{
00719 for(i=0; i<3; i++)
00720 for(j=0; j<256; j++)
00721 s->stats[i][j]= 0;
00722 }
00723
00724
00725
00726 alloc_temp(s);
00727
00728 s->picture_number=0;
00729
00730 return 0;
00731 }
00732 #endif
00733
00734
00735
00736 #define READ_2PIX(dst0, dst1, plane1){\
00737 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00738 if(code != 0xffff){\
00739 dst0 = code>>8;\
00740 dst1 = code;\
00741 }else{\
00742 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00743 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00744 }\
00745 }
00746
00747 static void decode_422_bitstream(HYuvContext *s, int count){
00748 int i;
00749
00750 count/=2;
00751
00752 if(count >= (get_bits_left(&s->gb))/(31*4)){
00753 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
00754 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00755 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00756 }
00757 }else{
00758 for(i=0; i<count; i++){
00759 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00760 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00761 }
00762 }
00763 }
00764
00765 static void decode_gray_bitstream(HYuvContext *s, int count){
00766 int i;
00767
00768 count/=2;
00769
00770 if(count >= (get_bits_left(&s->gb))/(31*2)){
00771 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
00772 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00773 }
00774 }else{
00775 for(i=0; i<count; i++){
00776 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00777 }
00778 }
00779 }
00780
00781 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00782 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
00783 int i;
00784 const uint8_t *y = s->temp[0] + offset;
00785 const uint8_t *u = s->temp[1] + offset/2;
00786 const uint8_t *v = s->temp[2] + offset/2;
00787
00788 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
00789 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00790 return -1;
00791 }
00792
00793 #define LOAD4\
00794 int y0 = y[2*i];\
00795 int y1 = y[2*i+1];\
00796 int u0 = u[i];\
00797 int v0 = v[i];
00798
00799 count/=2;
00800 if(s->flags&CODEC_FLAG_PASS1){
00801 for(i=0; i<count; i++){
00802 LOAD4;
00803 s->stats[0][y0]++;
00804 s->stats[1][u0]++;
00805 s->stats[0][y1]++;
00806 s->stats[2][v0]++;
00807 }
00808 }
00809 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00810 return 0;
00811 if(s->context){
00812 for(i=0; i<count; i++){
00813 LOAD4;
00814 s->stats[0][y0]++;
00815 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00816 s->stats[1][u0]++;
00817 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00818 s->stats[0][y1]++;
00819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00820 s->stats[2][v0]++;
00821 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00822 }
00823 }else{
00824 for(i=0; i<count; i++){
00825 LOAD4;
00826 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00827 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00828 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00829 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00830 }
00831 }
00832 return 0;
00833 }
00834
00835 static int encode_gray_bitstream(HYuvContext *s, int count){
00836 int i;
00837
00838 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
00839 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00840 return -1;
00841 }
00842
00843 #define LOAD2\
00844 int y0 = s->temp[0][2*i];\
00845 int y1 = s->temp[0][2*i+1];
00846 #define STAT2\
00847 s->stats[0][y0]++;\
00848 s->stats[0][y1]++;
00849 #define WRITE2\
00850 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00851 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00852
00853 count/=2;
00854 if(s->flags&CODEC_FLAG_PASS1){
00855 for(i=0; i<count; i++){
00856 LOAD2;
00857 STAT2;
00858 }
00859 }
00860 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00861 return 0;
00862
00863 if(s->context){
00864 for(i=0; i<count; i++){
00865 LOAD2;
00866 STAT2;
00867 WRITE2;
00868 }
00869 }else{
00870 for(i=0; i<count; i++){
00871 LOAD2;
00872 WRITE2;
00873 }
00874 }
00875 return 0;
00876 }
00877 #endif
00878
00879 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
00880 int i;
00881 for(i=0; i<count; i++){
00882 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00883 if(code != -1){
00884 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
00885 }else if(decorrelate){
00886 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00887 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00888 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00889 }else{
00890 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00891 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00892 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00893 }
00894 if(alpha)
00895 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00896 }
00897 }
00898
00899 static void decode_bgr_bitstream(HYuvContext *s, int count){
00900 if(s->decorrelate){
00901 if(s->bitstream_bpp==24)
00902 decode_bgr_1(s, count, 1, 0);
00903 else
00904 decode_bgr_1(s, count, 1, 1);
00905 }else{
00906 if(s->bitstream_bpp==24)
00907 decode_bgr_1(s, count, 0, 0);
00908 else
00909 decode_bgr_1(s, count, 0, 1);
00910 }
00911 }
00912
00913 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
00914 int i;
00915
00916 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
00917 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00918 return -1;
00919 }
00920
00921 #define LOAD3\
00922 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
00923 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
00924 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
00925 int a= s->temp[0][planes*i+A];
00926 #define STAT3\
00927 s->stats[0][b]++;\
00928 s->stats[1][g]++;\
00929 s->stats[2][r]++;\
00930 if(planes==4) s->stats[2][a]++;
00931 #define WRITE3\
00932 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00933 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00934 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
00935 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
00936
00937 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
00938 for(i=0; i<count; i++){
00939 LOAD3;
00940 STAT3;
00941 }
00942 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
00943 for(i=0; i<count; i++){
00944 LOAD3;
00945 STAT3;
00946 WRITE3;
00947 }
00948 }else{
00949 for(i=0; i<count; i++){
00950 LOAD3;
00951 WRITE3;
00952 }
00953 }
00954 return 0;
00955 }
00956
00957 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00958 static void draw_slice(HYuvContext *s, int y){
00959 int h, cy, i;
00960 int offset[AV_NUM_DATA_POINTERS];
00961
00962 if(s->avctx->draw_horiz_band==NULL)
00963 return;
00964
00965 h= y - s->last_slice_end;
00966 y -= h;
00967
00968 if(s->bitstream_bpp==12){
00969 cy= y>>1;
00970 }else{
00971 cy= y;
00972 }
00973
00974 offset[0] = s->picture.linesize[0]*y;
00975 offset[1] = s->picture.linesize[1]*cy;
00976 offset[2] = s->picture.linesize[2]*cy;
00977 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
00978 offset[i] = 0;
00979 emms_c();
00980
00981 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
00982
00983 s->last_slice_end= y + h;
00984 }
00985
00986 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
00987 const uint8_t *buf = avpkt->data;
00988 int buf_size = avpkt->size;
00989 HYuvContext *s = avctx->priv_data;
00990 const int width= s->width;
00991 const int width2= s->width>>1;
00992 const int height= s->height;
00993 int fake_ystride, fake_ustride, fake_vstride;
00994 AVFrame * const p= &s->picture;
00995 int table_size= 0;
00996
00997 AVFrame *picture = data;
00998
00999 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
01000 if (!s->bitstream_buffer)
01001 return AVERROR(ENOMEM);
01002
01003 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
01004 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
01005
01006 if(p->data[0])
01007 ff_thread_release_buffer(avctx, p);
01008
01009 p->reference= 0;
01010 if(ff_thread_get_buffer(avctx, p) < 0){
01011 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01012 return -1;
01013 }
01014
01015 if(s->context){
01016 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
01017 if(table_size < 0)
01018 return -1;
01019 }
01020
01021 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
01022 return -1;
01023
01024 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
01025
01026 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
01027 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
01028 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
01029
01030 s->last_slice_end= 0;
01031
01032 if(s->bitstream_bpp<24){
01033 int y, cy;
01034 int lefty, leftu, leftv;
01035 int lefttopy, lefttopu, lefttopv;
01036
01037 if(s->yuy2){
01038 p->data[0][3]= get_bits(&s->gb, 8);
01039 p->data[0][2]= get_bits(&s->gb, 8);
01040 p->data[0][1]= get_bits(&s->gb, 8);
01041 p->data[0][0]= get_bits(&s->gb, 8);
01042
01043 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
01044 return -1;
01045 }else{
01046
01047 leftv= p->data[2][0]= get_bits(&s->gb, 8);
01048 lefty= p->data[0][1]= get_bits(&s->gb, 8);
01049 leftu= p->data[1][0]= get_bits(&s->gb, 8);
01050 p->data[0][0]= get_bits(&s->gb, 8);
01051
01052 switch(s->predictor){
01053 case LEFT:
01054 case PLANE:
01055 decode_422_bitstream(s, width-2);
01056 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01057 if(!(s->flags&CODEC_FLAG_GRAY)){
01058 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01059 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01060 }
01061
01062 for(cy=y=1; y<s->height; y++,cy++){
01063 uint8_t *ydst, *udst, *vdst;
01064
01065 if(s->bitstream_bpp==12){
01066 decode_gray_bitstream(s, width);
01067
01068 ydst= p->data[0] + p->linesize[0]*y;
01069
01070 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01071 if(s->predictor == PLANE){
01072 if(y>s->interlaced)
01073 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01074 }
01075 y++;
01076 if(y>=s->height) break;
01077 }
01078
01079 draw_slice(s, y);
01080
01081 ydst= p->data[0] + p->linesize[0]*y;
01082 udst= p->data[1] + p->linesize[1]*cy;
01083 vdst= p->data[2] + p->linesize[2]*cy;
01084
01085 decode_422_bitstream(s, width);
01086 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01087 if(!(s->flags&CODEC_FLAG_GRAY)){
01088 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01089 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01090 }
01091 if(s->predictor == PLANE){
01092 if(cy>s->interlaced){
01093 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01094 if(!(s->flags&CODEC_FLAG_GRAY)){
01095 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01096 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01097 }
01098 }
01099 }
01100 }
01101 draw_slice(s, height);
01102
01103 break;
01104 case MEDIAN:
01105
01106 decode_422_bitstream(s, width-2);
01107 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01108 if(!(s->flags&CODEC_FLAG_GRAY)){
01109 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01110 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01111 }
01112
01113 cy=y=1;
01114
01115
01116 if(s->interlaced){
01117 decode_422_bitstream(s, width);
01118 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01119 if(!(s->flags&CODEC_FLAG_GRAY)){
01120 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01121 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01122 }
01123 y++; cy++;
01124 }
01125
01126
01127 decode_422_bitstream(s, 4);
01128 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01129 if(!(s->flags&CODEC_FLAG_GRAY)){
01130 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01131 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01132 }
01133
01134
01135 lefttopy= p->data[0][3];
01136 decode_422_bitstream(s, width-4);
01137 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01138 if(!(s->flags&CODEC_FLAG_GRAY)){
01139 lefttopu= p->data[1][1];
01140 lefttopv= p->data[2][1];
01141 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
01142 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
01143 }
01144 y++; cy++;
01145
01146 for(; y<height; y++,cy++){
01147 uint8_t *ydst, *udst, *vdst;
01148
01149 if(s->bitstream_bpp==12){
01150 while(2*cy > y){
01151 decode_gray_bitstream(s, width);
01152 ydst= p->data[0] + p->linesize[0]*y;
01153 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01154 y++;
01155 }
01156 if(y>=height) break;
01157 }
01158 draw_slice(s, y);
01159
01160 decode_422_bitstream(s, width);
01161
01162 ydst= p->data[0] + p->linesize[0]*y;
01163 udst= p->data[1] + p->linesize[1]*cy;
01164 vdst= p->data[2] + p->linesize[2]*cy;
01165
01166 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01167 if(!(s->flags&CODEC_FLAG_GRAY)){
01168 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01169 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01170 }
01171 }
01172
01173 draw_slice(s, height);
01174 break;
01175 }
01176 }
01177 }else{
01178 int y;
01179 int leftr, leftg, leftb, lefta;
01180 const int last_line= (height-1)*p->linesize[0];
01181
01182 if(s->bitstream_bpp==32){
01183 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
01184 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01185 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01186 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01187 }else{
01188 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01189 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01190 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01191 lefta= p->data[0][last_line+A]= 255;
01192 skip_bits(&s->gb, 8);
01193 }
01194
01195 if(s->bgr32){
01196 switch(s->predictor){
01197 case LEFT:
01198 case PLANE:
01199 decode_bgr_bitstream(s, width-1);
01200 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
01201
01202 for(y=s->height-2; y>=0; y--){
01203 decode_bgr_bitstream(s, width);
01204
01205 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01206 if(s->predictor == PLANE){
01207 if(s->bitstream_bpp!=32) lefta=0;
01208 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
01209 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
01210 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
01211 }
01212 }
01213 }
01214 draw_slice(s, height);
01215 break;
01216 default:
01217 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
01218 }
01219 }else{
01220
01221 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
01222 return -1;
01223 }
01224 }
01225 emms_c();
01226
01227 *picture= *p;
01228 *data_size = sizeof(AVFrame);
01229
01230 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
01231 }
01232 #endif
01233
01234 static int common_end(HYuvContext *s){
01235 int i;
01236
01237 for(i=0; i<3; i++){
01238 av_freep(&s->temp[i]);
01239 }
01240 return 0;
01241 }
01242
01243 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01244 static av_cold int decode_end(AVCodecContext *avctx)
01245 {
01246 HYuvContext *s = avctx->priv_data;
01247 int i;
01248
01249 if (s->picture.data[0])
01250 avctx->release_buffer(avctx, &s->picture);
01251
01252 common_end(s);
01253 av_freep(&s->bitstream_buffer);
01254
01255 for(i=0; i<6; i++){
01256 free_vlc(&s->vlc[i]);
01257 }
01258
01259 return 0;
01260 }
01261 #endif
01262
01263 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01264 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
01265 HYuvContext *s = avctx->priv_data;
01266 AVFrame *pict = data;
01267 const int width= s->width;
01268 const int width2= s->width>>1;
01269 const int height= s->height;
01270 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01271 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01272 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01273 AVFrame * const p= &s->picture;
01274 int i, j, size=0;
01275
01276 *p = *pict;
01277 p->pict_type= AV_PICTURE_TYPE_I;
01278 p->key_frame= 1;
01279
01280 if(s->context){
01281 for(i=0; i<3; i++){
01282 generate_len_table(s->len[i], s->stats[i]);
01283 if(generate_bits_table(s->bits[i], s->len[i])<0)
01284 return -1;
01285 size+= store_table(s, s->len[i], &buf[size]);
01286 }
01287
01288 for(i=0; i<3; i++)
01289 for(j=0; j<256; j++)
01290 s->stats[i][j] >>= 1;
01291 }
01292
01293 init_put_bits(&s->pb, buf+size, buf_size-size);
01294
01295 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
01296 int lefty, leftu, leftv, y, cy;
01297
01298 put_bits(&s->pb, 8, leftv= p->data[2][0]);
01299 put_bits(&s->pb, 8, lefty= p->data[0][1]);
01300 put_bits(&s->pb, 8, leftu= p->data[1][0]);
01301 put_bits(&s->pb, 8, p->data[0][0]);
01302
01303 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01304 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01305 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01306
01307 encode_422_bitstream(s, 2, width-2);
01308
01309 if(s->predictor==MEDIAN){
01310 int lefttopy, lefttopu, lefttopv;
01311 cy=y=1;
01312 if(s->interlaced){
01313 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
01314 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
01315 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
01316
01317 encode_422_bitstream(s, 0, width);
01318 y++; cy++;
01319 }
01320
01321 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
01322 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
01323 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
01324
01325 encode_422_bitstream(s, 0, 4);
01326
01327 lefttopy= p->data[0][3];
01328 lefttopu= p->data[1][1];
01329 lefttopv= p->data[2][1];
01330 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
01331 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
01332 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
01333 encode_422_bitstream(s, 0, width-4);
01334 y++; cy++;
01335
01336 for(; y<height; y++,cy++){
01337 uint8_t *ydst, *udst, *vdst;
01338
01339 if(s->bitstream_bpp==12){
01340 while(2*cy > y){
01341 ydst= p->data[0] + p->linesize[0]*y;
01342 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01343 encode_gray_bitstream(s, width);
01344 y++;
01345 }
01346 if(y>=height) break;
01347 }
01348 ydst= p->data[0] + p->linesize[0]*y;
01349 udst= p->data[1] + p->linesize[1]*cy;
01350 vdst= p->data[2] + p->linesize[2]*cy;
01351
01352 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01353 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01354 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01355
01356 encode_422_bitstream(s, 0, width);
01357 }
01358 }else{
01359 for(cy=y=1; y<height; y++,cy++){
01360 uint8_t *ydst, *udst, *vdst;
01361
01362
01363 if(s->bitstream_bpp==12){
01364 ydst= p->data[0] + p->linesize[0]*y;
01365
01366 if(s->predictor == PLANE && s->interlaced < y){
01367 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01368
01369 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01370 }else{
01371 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01372 }
01373 encode_gray_bitstream(s, width);
01374 y++;
01375 if(y>=height) break;
01376 }
01377
01378 ydst= p->data[0] + p->linesize[0]*y;
01379 udst= p->data[1] + p->linesize[1]*cy;
01380 vdst= p->data[2] + p->linesize[2]*cy;
01381
01382 if(s->predictor == PLANE && s->interlaced < cy){
01383 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01384 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01385 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01386
01387 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01388 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01389 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01390 }else{
01391 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01392 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01393 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01394 }
01395
01396 encode_422_bitstream(s, 0, width);
01397 }
01398 }
01399 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
01400 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01401 const int stride = -p->linesize[0];
01402 const int fake_stride = -fake_ystride;
01403 int y;
01404 int leftr, leftg, leftb, lefta;
01405
01406 put_bits(&s->pb, 8, lefta= data[A]);
01407 put_bits(&s->pb, 8, leftr= data[R]);
01408 put_bits(&s->pb, 8, leftg= data[G]);
01409 put_bits(&s->pb, 8, leftb= data[B]);
01410
01411 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
01412 encode_bgra_bitstream(s, width-1, 4);
01413
01414 for(y=1; y<s->height; y++){
01415 uint8_t *dst = data + y*stride;
01416 if(s->predictor == PLANE && s->interlaced < y){
01417 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
01418 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
01419 }else{
01420 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
01421 }
01422 encode_bgra_bitstream(s, width, 4);
01423 }
01424 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
01425 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01426 const int stride = -p->linesize[0];
01427 const int fake_stride = -fake_ystride;
01428 int y;
01429 int leftr, leftg, leftb;
01430
01431 put_bits(&s->pb, 8, leftr= data[0]);
01432 put_bits(&s->pb, 8, leftg= data[1]);
01433 put_bits(&s->pb, 8, leftb= data[2]);
01434 put_bits(&s->pb, 8, 0);
01435
01436 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
01437 encode_bgra_bitstream(s, width-1, 3);
01438
01439 for(y=1; y<s->height; y++){
01440 uint8_t *dst = data + y*stride;
01441 if(s->predictor == PLANE && s->interlaced < y){
01442 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
01443 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01444 }else{
01445 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01446 }
01447 encode_bgra_bitstream(s, width, 3);
01448 }
01449 }else{
01450 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01451 }
01452 emms_c();
01453
01454 size+= (put_bits_count(&s->pb)+31)/8;
01455 put_bits(&s->pb, 16, 0);
01456 put_bits(&s->pb, 15, 0);
01457 size/= 4;
01458
01459 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
01460 int j;
01461 char *p= avctx->stats_out;
01462 char *end= p + 1024*30;
01463 for(i=0; i<3; i++){
01464 for(j=0; j<256; j++){
01465 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01466 p+= strlen(p);
01467 s->stats[i][j]= 0;
01468 }
01469 snprintf(p, end-p, "\n");
01470 p++;
01471 }
01472 } else
01473 avctx->stats_out[0] = '\0';
01474 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
01475 flush_put_bits(&s->pb);
01476 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
01477 }
01478
01479 s->picture_number++;
01480
01481 return size*4;
01482 }
01483
01484 static av_cold int encode_end(AVCodecContext *avctx)
01485 {
01486 HYuvContext *s = avctx->priv_data;
01487
01488 common_end(s);
01489
01490 av_freep(&avctx->extradata);
01491 av_freep(&avctx->stats_out);
01492
01493 return 0;
01494 }
01495 #endif
01496
01497 #if CONFIG_HUFFYUV_DECODER
01498 AVCodec ff_huffyuv_decoder = {
01499 .name = "huffyuv",
01500 .type = AVMEDIA_TYPE_VIDEO,
01501 .id = CODEC_ID_HUFFYUV,
01502 .priv_data_size = sizeof(HYuvContext),
01503 .init = decode_init,
01504 .close = decode_end,
01505 .decode = decode_frame,
01506 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01507 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01508 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01509 };
01510 #endif
01511
01512 #if CONFIG_FFVHUFF_DECODER
01513 AVCodec ff_ffvhuff_decoder = {
01514 .name = "ffvhuff",
01515 .type = AVMEDIA_TYPE_VIDEO,
01516 .id = CODEC_ID_FFVHUFF,
01517 .priv_data_size = sizeof(HYuvContext),
01518 .init = decode_init,
01519 .close = decode_end,
01520 .decode = decode_frame,
01521 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
01522 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01523 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01524 };
01525 #endif
01526
01527 #if CONFIG_HUFFYUV_ENCODER
01528 AVCodec ff_huffyuv_encoder = {
01529 .name = "huffyuv",
01530 .type = AVMEDIA_TYPE_VIDEO,
01531 .id = CODEC_ID_HUFFYUV,
01532 .priv_data_size = sizeof(HYuvContext),
01533 .init = encode_init,
01534 .encode = encode_frame,
01535 .close = encode_end,
01536 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
01537 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01538 };
01539 #endif
01540
01541 #if CONFIG_FFVHUFF_ENCODER
01542 AVCodec ff_ffvhuff_encoder = {
01543 .name = "ffvhuff",
01544 .type = AVMEDIA_TYPE_VIDEO,
01545 .id = CODEC_ID_FFVHUFF,
01546 .priv_data_size = sizeof(HYuvContext),
01547 .init = encode_init,
01548 .encode = encode_frame,
01549 .close = encode_end,
01550 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
01551 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01552 };
01553 #endif