00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "internal.h"
00033 #include "get_bits.h"
00034 #include "put_bits.h"
00035 #include "dsputil.h"
00036 #include "thread.h"
00037
00038 #define VLC_BITS 11
00039
00040 #if HAVE_BIGENDIAN
00041 #define B 3
00042 #define G 2
00043 #define R 1
00044 #define A 0
00045 #else
00046 #define B 0
00047 #define G 1
00048 #define R 2
00049 #define A 3
00050 #endif
00051
00052 typedef enum Predictor{
00053 LEFT= 0,
00054 PLANE,
00055 MEDIAN,
00056 } Predictor;
00057
00058 typedef struct HYuvContext{
00059 AVCodecContext *avctx;
00060 Predictor predictor;
00061 GetBitContext gb;
00062 PutBitContext pb;
00063 int interlaced;
00064 int decorrelate;
00065 int bitstream_bpp;
00066 int version;
00067 int yuy2;
00068 int bgr32;
00069 int width, height;
00070 int flags;
00071 int context;
00072 int picture_number;
00073 int last_slice_end;
00074 uint8_t *temp[3];
00075 uint64_t stats[3][256];
00076 uint8_t len[3][256];
00077 uint32_t bits[3][256];
00078 uint32_t pix_bgr_map[1<<VLC_BITS];
00079 VLC vlc[6];
00080 AVFrame picture;
00081 uint8_t *bitstream_buffer;
00082 unsigned int bitstream_buffer_size;
00083 DSPContext dsp;
00084 }HYuvContext;
00085
00086 #define classic_shift_luma_table_size 42
00087 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00088 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00089 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00090 69,68, 0,
00091 0,0,0,0,0,0,0,0,
00092 };
00093
00094 #define classic_shift_chroma_table_size 59
00095 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00096 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00097 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00098 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
00099 0,0,0,0,0,0,0,0,
00100 };
00101
00102 static const unsigned char classic_add_luma[256] = {
00103 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00104 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00105 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00106 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00107 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00108 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00109 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00110 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00111 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00112 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00113 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00114 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00115 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00116 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00117 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00118 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00119 };
00120
00121 static const unsigned char classic_add_chroma[256] = {
00122 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00123 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00124 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00125 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00126 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00127 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00128 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00129 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00130 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00131 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00132 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00133 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00134 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00135 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00136 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00137 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00138 };
00139
00140 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left){
00141 int i;
00142 if(w<32){
00143 for(i=0; i<w; i++){
00144 const int temp= src[i];
00145 dst[i]= temp - left;
00146 left= temp;
00147 }
00148 return left;
00149 }else{
00150 for(i=0; i<16; i++){
00151 const int temp= src[i];
00152 dst[i]= temp - left;
00153 left= temp;
00154 }
00155 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
00156 return src[w-1];
00157 }
00158 }
00159
00160 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
00161 int i;
00162 int r,g,b,a;
00163 r= *red;
00164 g= *green;
00165 b= *blue;
00166 a= *alpha;
00167 for(i=0; i<FFMIN(w,4); i++){
00168 const int rt= src[i*4+R];
00169 const int gt= src[i*4+G];
00170 const int bt= src[i*4+B];
00171 const int at= src[i*4+A];
00172 dst[i*4+R]= rt - r;
00173 dst[i*4+G]= gt - g;
00174 dst[i*4+B]= bt - b;
00175 dst[i*4+A]= at - a;
00176 r = rt;
00177 g = gt;
00178 b = bt;
00179 a = at;
00180 }
00181 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
00182 *red= src[(w-1)*4+R];
00183 *green= src[(w-1)*4+G];
00184 *blue= src[(w-1)*4+B];
00185 *alpha= src[(w-1)*4+A];
00186 }
00187
00188 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
00189 int i;
00190 int r,g,b;
00191 r= *red;
00192 g= *green;
00193 b= *blue;
00194 for(i=0; i<FFMIN(w,16); i++){
00195 const int rt= src[i*3+0];
00196 const int gt= src[i*3+1];
00197 const int bt= src[i*3+2];
00198 dst[i*3+0]= rt - r;
00199 dst[i*3+1]= gt - g;
00200 dst[i*3+2]= bt - b;
00201 r = rt;
00202 g = gt;
00203 b = bt;
00204 }
00205 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
00206 *red= src[(w-1)*3+0];
00207 *green= src[(w-1)*3+1];
00208 *blue= src[(w-1)*3+2];
00209 }
00210
00211 static int read_len_table(uint8_t *dst, GetBitContext *gb){
00212 int i, val, repeat;
00213
00214 for(i=0; i<256;){
00215 repeat= get_bits(gb, 3);
00216 val = get_bits(gb, 5);
00217 if(repeat==0)
00218 repeat= get_bits(gb, 8);
00219
00220 if(i+repeat > 256 || get_bits_left(gb) < 0) {
00221 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00222 return -1;
00223 }
00224 while (repeat--)
00225 dst[i++] = val;
00226 }
00227 return 0;
00228 }
00229
00230 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
00231 int len, index;
00232 uint32_t bits=0;
00233
00234 for(len=32; len>0; len--){
00235 for(index=0; index<256; index++){
00236 if(len_table[index]==len)
00237 dst[index]= bits++;
00238 }
00239 if(bits & 1){
00240 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00241 return -1;
00242 }
00243 bits >>= 1;
00244 }
00245 return 0;
00246 }
00247
00248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00249 typedef struct {
00250 uint64_t val;
00251 int name;
00252 } HeapElem;
00253
00254 static void heap_sift(HeapElem *h, int root, int size)
00255 {
00256 while(root*2+1 < size) {
00257 int child = root*2+1;
00258 if(child < size-1 && h[child].val > h[child+1].val)
00259 child++;
00260 if(h[root].val > h[child].val) {
00261 FFSWAP(HeapElem, h[root], h[child]);
00262 root = child;
00263 } else
00264 break;
00265 }
00266 }
00267
00268 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
00269 HeapElem h[256];
00270 int up[2*256];
00271 int len[2*256];
00272 int offset, i, next;
00273 int size = 256;
00274
00275 for(offset=1; ; offset<<=1){
00276 for(i=0; i<size; i++){
00277 h[i].name = i;
00278 h[i].val = (stats[i] << 8) + offset;
00279 }
00280 for(i=size/2-1; i>=0; i--)
00281 heap_sift(h, i, size);
00282
00283 for(next=size; next<size*2-1; next++){
00284
00285 uint64_t min1v = h[0].val;
00286 up[h[0].name] = next;
00287 h[0].val = INT64_MAX;
00288 heap_sift(h, 0, size);
00289 up[h[0].name] = next;
00290 h[0].name = next;
00291 h[0].val += min1v;
00292 heap_sift(h, 0, size);
00293 }
00294
00295 len[2*size-2] = 0;
00296 for(i=2*size-3; i>=size; i--)
00297 len[i] = len[up[i]] + 1;
00298 for(i=0; i<size; i++) {
00299 dst[i] = len[up[i]] + 1;
00300 if(dst[i] >= 32) break;
00301 }
00302 if(i==size) break;
00303 }
00304 }
00305 #endif
00306
00307 static void generate_joint_tables(HYuvContext *s){
00308 uint16_t symbols[1<<VLC_BITS];
00309 uint16_t bits[1<<VLC_BITS];
00310 uint8_t len[1<<VLC_BITS];
00311 if(s->bitstream_bpp < 24){
00312 int p, i, y, u;
00313 for(p=0; p<3; p++){
00314 for(i=y=0; y<256; y++){
00315 int len0 = s->len[0][y];
00316 int limit = VLC_BITS - len0;
00317 if(limit <= 0)
00318 continue;
00319 for(u=0; u<256; u++){
00320 int len1 = s->len[p][u];
00321 if(len1 > limit)
00322 continue;
00323 len[i] = len0 + len1;
00324 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00325 symbols[i] = (y<<8) + u;
00326 if(symbols[i] != 0xffff)
00327 i++;
00328 }
00329 }
00330 ff_free_vlc(&s->vlc[3+p]);
00331 ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
00332 }
00333 }else{
00334 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00335 int i, b, g, r, code;
00336 int p0 = s->decorrelate;
00337 int p1 = !s->decorrelate;
00338
00339
00340
00341 for(i=0, g=-16; g<16; g++){
00342 int len0 = s->len[p0][g&255];
00343 int limit0 = VLC_BITS - len0;
00344 if(limit0 < 2)
00345 continue;
00346 for(b=-16; b<16; b++){
00347 int len1 = s->len[p1][b&255];
00348 int limit1 = limit0 - len1;
00349 if(limit1 < 1)
00350 continue;
00351 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
00352 for(r=-16; r<16; r++){
00353 int len2 = s->len[2][r&255];
00354 if(len2 > limit1)
00355 continue;
00356 len[i] = len0 + len1 + len2;
00357 bits[i] = (code << len2) + s->bits[2][r&255];
00358 if(s->decorrelate){
00359 map[i][G] = g;
00360 map[i][B] = g+b;
00361 map[i][R] = g+r;
00362 }else{
00363 map[i][B] = g;
00364 map[i][G] = b;
00365 map[i][R] = r;
00366 }
00367 i++;
00368 }
00369 }
00370 }
00371 ff_free_vlc(&s->vlc[3]);
00372 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
00373 }
00374 }
00375
00376 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
00377 GetBitContext gb;
00378 int i;
00379
00380 init_get_bits(&gb, src, length*8);
00381
00382 for(i=0; i<3; i++){
00383 if(read_len_table(s->len[i], &gb)<0)
00384 return -1;
00385 if(generate_bits_table(s->bits[i], s->len[i])<0){
00386 return -1;
00387 }
00388 ff_free_vlc(&s->vlc[i]);
00389 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00390 }
00391
00392 generate_joint_tables(s);
00393
00394 return (get_bits_count(&gb)+7)/8;
00395 }
00396
00397 static int read_old_huffman_tables(HYuvContext *s){
00398 GetBitContext gb;
00399 int i;
00400
00401 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
00402 if(read_len_table(s->len[0], &gb)<0)
00403 return -1;
00404 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
00405 if(read_len_table(s->len[1], &gb)<0)
00406 return -1;
00407
00408 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00409 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00410
00411 if(s->bitstream_bpp >= 24){
00412 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
00413 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
00414 }
00415 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
00416 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
00417
00418 for(i=0; i<3; i++){
00419 ff_free_vlc(&s->vlc[i]);
00420 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
00421 }
00422
00423 generate_joint_tables(s);
00424
00425 return 0;
00426 }
00427
00428 static av_cold void alloc_temp(HYuvContext *s){
00429 int i;
00430
00431 if(s->bitstream_bpp<24){
00432 for(i=0; i<3; i++){
00433 s->temp[i]= av_malloc(s->width + 16);
00434 }
00435 }else{
00436 s->temp[0]= av_mallocz(4*s->width + 16);
00437 }
00438 }
00439
00440 static av_cold int common_init(AVCodecContext *avctx){
00441 HYuvContext *s = avctx->priv_data;
00442
00443 s->avctx= avctx;
00444 s->flags= avctx->flags;
00445
00446 ff_dsputil_init(&s->dsp, avctx);
00447
00448 s->width= avctx->width;
00449 s->height= avctx->height;
00450 assert(s->width>0 && s->height>0);
00451
00452 return 0;
00453 }
00454
00455 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00456 static av_cold int decode_init(AVCodecContext *avctx)
00457 {
00458 HYuvContext *s = avctx->priv_data;
00459
00460 common_init(avctx);
00461 memset(s->vlc, 0, 3*sizeof(VLC));
00462
00463 avctx->coded_frame= &s->picture;
00464 avcodec_get_frame_defaults(&s->picture);
00465 s->interlaced= s->height > 288;
00466
00467 s->bgr32=1;
00468
00469
00470 if(avctx->extradata_size){
00471 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
00472 s->version=1;
00473 else
00474 s->version=2;
00475 }else
00476 s->version=0;
00477
00478 if(s->version==2){
00479 int method, interlace;
00480
00481 if (avctx->extradata_size < 4)
00482 return -1;
00483
00484 method= ((uint8_t*)avctx->extradata)[0];
00485 s->decorrelate= method&64 ? 1 : 0;
00486 s->predictor= method&63;
00487 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
00488 if(s->bitstream_bpp==0)
00489 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
00490 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00491 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
00492 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00493
00494 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
00495 return -1;
00496 }else{
00497 switch(avctx->bits_per_coded_sample&7){
00498 case 1:
00499 s->predictor= LEFT;
00500 s->decorrelate= 0;
00501 break;
00502 case 2:
00503 s->predictor= LEFT;
00504 s->decorrelate= 1;
00505 break;
00506 case 3:
00507 s->predictor= PLANE;
00508 s->decorrelate= avctx->bits_per_coded_sample >= 24;
00509 break;
00510 case 4:
00511 s->predictor= MEDIAN;
00512 s->decorrelate= 0;
00513 break;
00514 default:
00515 s->predictor= LEFT;
00516 s->decorrelate= 0;
00517 break;
00518 }
00519 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
00520 s->context= 0;
00521
00522 if(read_old_huffman_tables(s) < 0)
00523 return -1;
00524 }
00525
00526 switch(s->bitstream_bpp){
00527 case 12:
00528 avctx->pix_fmt = PIX_FMT_YUV420P;
00529 break;
00530 case 16:
00531 if(s->yuy2){
00532 avctx->pix_fmt = PIX_FMT_YUYV422;
00533 }else{
00534 avctx->pix_fmt = PIX_FMT_YUV422P;
00535 }
00536 break;
00537 case 24:
00538 case 32:
00539 if(s->bgr32){
00540 avctx->pix_fmt = PIX_FMT_RGB32;
00541 }else{
00542 avctx->pix_fmt = PIX_FMT_BGR24;
00543 }
00544 break;
00545 default:
00546 return AVERROR_INVALIDDATA;
00547 }
00548
00549 if ((avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P) && avctx->width & 1) {
00550 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00551 return AVERROR_INVALIDDATA;
00552 }
00553
00554 alloc_temp(s);
00555
00556
00557
00558 return 0;
00559 }
00560
00561 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00562 {
00563 HYuvContext *s = avctx->priv_data;
00564 int i;
00565
00566 avctx->coded_frame= &s->picture;
00567 alloc_temp(s);
00568
00569 for (i = 0; i < 6; i++)
00570 s->vlc[i].table = NULL;
00571
00572 if(s->version==2){
00573 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
00574 return -1;
00575 }else{
00576 if(read_old_huffman_tables(s) < 0)
00577 return -1;
00578 }
00579
00580 return 0;
00581 }
00582 #endif
00583
00584 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00585 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
00586 int i;
00587 int index= 0;
00588
00589 for(i=0; i<256;){
00590 int val= len[i];
00591 int repeat=0;
00592
00593 for(; i<256 && len[i]==val && repeat<255; i++)
00594 repeat++;
00595
00596 assert(val < 32 && val >0 && repeat<256 && repeat>0);
00597 if(repeat>7){
00598 buf[index++]= val;
00599 buf[index++]= repeat;
00600 }else{
00601 buf[index++]= val | (repeat<<5);
00602 }
00603 }
00604
00605 return index;
00606 }
00607
00608 static av_cold int encode_init(AVCodecContext *avctx)
00609 {
00610 HYuvContext *s = avctx->priv_data;
00611 int i, j;
00612
00613 common_init(avctx);
00614
00615 avctx->extradata= av_mallocz(1024*30);
00616 avctx->stats_out= av_mallocz(1024*30);
00617 s->version=2;
00618
00619 avctx->coded_frame= &s->picture;
00620
00621 switch(avctx->pix_fmt){
00622 case PIX_FMT_YUV420P:
00623 case PIX_FMT_YUV422P:
00624 if (s->width & 1) {
00625 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00626 return AVERROR(EINVAL);
00627 }
00628 s->bitstream_bpp = avctx->pix_fmt == PIX_FMT_YUV420P ? 12 : 16;
00629 break;
00630 case PIX_FMT_RGB32:
00631 s->bitstream_bpp= 32;
00632 break;
00633 case PIX_FMT_RGB24:
00634 s->bitstream_bpp= 24;
00635 break;
00636 default:
00637 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00638 return -1;
00639 }
00640 avctx->bits_per_coded_sample= s->bitstream_bpp;
00641 s->decorrelate= s->bitstream_bpp >= 24;
00642 s->predictor= avctx->prediction_method;
00643 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00644 if(avctx->context_model==1){
00645 s->context= avctx->context_model;
00646 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
00647 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
00648 return -1;
00649 }
00650 }else s->context= 0;
00651
00652 if(avctx->codec->id==CODEC_ID_HUFFYUV){
00653 if(avctx->pix_fmt==PIX_FMT_YUV420P){
00654 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
00655 return -1;
00656 }
00657 if(avctx->context_model){
00658 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
00659 return -1;
00660 }
00661 if(s->interlaced != ( s->height > 288 ))
00662 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
00663 }
00664
00665 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
00666 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
00667 return -1;
00668 }
00669
00670 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
00671 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
00672 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
00673 if(s->context)
00674 ((uint8_t*)avctx->extradata)[2]|= 0x40;
00675 ((uint8_t*)avctx->extradata)[3]= 0;
00676 s->avctx->extradata_size= 4;
00677
00678 if(avctx->stats_in){
00679 char *p= avctx->stats_in;
00680
00681 for(i=0; i<3; i++)
00682 for(j=0; j<256; j++)
00683 s->stats[i][j]= 1;
00684
00685 for(;;){
00686 for(i=0; i<3; i++){
00687 char *next;
00688
00689 for(j=0; j<256; j++){
00690 s->stats[i][j]+= strtol(p, &next, 0);
00691 if(next==p) return -1;
00692 p=next;
00693 }
00694 }
00695 if(p[0]==0 || p[1]==0 || p[2]==0) break;
00696 }
00697 }else{
00698 for(i=0; i<3; i++)
00699 for(j=0; j<256; j++){
00700 int d= FFMIN(j, 256-j);
00701
00702 s->stats[i][j]= 100000000/(d+1);
00703 }
00704 }
00705
00706 for(i=0; i<3; i++){
00707 generate_len_table(s->len[i], s->stats[i]);
00708
00709 if(generate_bits_table(s->bits[i], s->len[i])<0){
00710 return -1;
00711 }
00712
00713 s->avctx->extradata_size+=
00714 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00715 }
00716
00717 if(s->context){
00718 for(i=0; i<3; i++){
00719 int pels = s->width*s->height / (i?40:10);
00720 for(j=0; j<256; j++){
00721 int d= FFMIN(j, 256-j);
00722 s->stats[i][j]= pels/(d+1);
00723 }
00724 }
00725 }else{
00726 for(i=0; i<3; i++)
00727 for(j=0; j<256; j++)
00728 s->stats[i][j]= 0;
00729 }
00730
00731
00732
00733 alloc_temp(s);
00734
00735 s->picture_number=0;
00736
00737 return 0;
00738 }
00739 #endif
00740
00741
00742
00743 #define READ_2PIX(dst0, dst1, plane1){\
00744 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00745 if(code != 0xffff){\
00746 dst0 = code>>8;\
00747 dst1 = code;\
00748 }else{\
00749 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00750 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00751 }\
00752 }
00753
00754 static void decode_422_bitstream(HYuvContext *s, int count){
00755 int i;
00756
00757 count/=2;
00758
00759 if(count >= (get_bits_left(&s->gb))/(31*4)){
00760 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00761 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00762 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00763 }
00764 }else{
00765 for(i=0; i<count; i++){
00766 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
00767 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
00768 }
00769 }
00770 }
00771
00772 static void decode_gray_bitstream(HYuvContext *s, int count){
00773 int i;
00774
00775 count/=2;
00776
00777 if(count >= (get_bits_left(&s->gb))/(31*2)){
00778 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00779 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00780 }
00781 }else{
00782 for(i=0; i<count; i++){
00783 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
00784 }
00785 }
00786 }
00787
00788 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00789 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
00790 int i;
00791 const uint8_t *y = s->temp[0] + offset;
00792 const uint8_t *u = s->temp[1] + offset/2;
00793 const uint8_t *v = s->temp[2] + offset/2;
00794
00795 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
00796 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00797 return -1;
00798 }
00799
00800 #define LOAD4\
00801 int y0 = y[2*i];\
00802 int y1 = y[2*i+1];\
00803 int u0 = u[i];\
00804 int v0 = v[i];
00805
00806 count/=2;
00807 if(s->flags&CODEC_FLAG_PASS1){
00808 for(i=0; i<count; i++){
00809 LOAD4;
00810 s->stats[0][y0]++;
00811 s->stats[1][u0]++;
00812 s->stats[0][y1]++;
00813 s->stats[2][v0]++;
00814 }
00815 }
00816 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00817 return 0;
00818 if(s->context){
00819 for(i=0; i<count; i++){
00820 LOAD4;
00821 s->stats[0][y0]++;
00822 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00823 s->stats[1][u0]++;
00824 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00825 s->stats[0][y1]++;
00826 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00827 s->stats[2][v0]++;
00828 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00829 }
00830 }else{
00831 for(i=0; i<count; i++){
00832 LOAD4;
00833 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00834 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00835 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00836 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00837 }
00838 }
00839 return 0;
00840 }
00841
00842 static int encode_gray_bitstream(HYuvContext *s, int count){
00843 int i;
00844
00845 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
00846 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00847 return -1;
00848 }
00849
00850 #define LOAD2\
00851 int y0 = s->temp[0][2*i];\
00852 int y1 = s->temp[0][2*i+1];
00853 #define STAT2\
00854 s->stats[0][y0]++;\
00855 s->stats[0][y1]++;
00856 #define WRITE2\
00857 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00858 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00859
00860 count/=2;
00861 if(s->flags&CODEC_FLAG_PASS1){
00862 for(i=0; i<count; i++){
00863 LOAD2;
00864 STAT2;
00865 }
00866 }
00867 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
00868 return 0;
00869
00870 if(s->context){
00871 for(i=0; i<count; i++){
00872 LOAD2;
00873 STAT2;
00874 WRITE2;
00875 }
00876 }else{
00877 for(i=0; i<count; i++){
00878 LOAD2;
00879 WRITE2;
00880 }
00881 }
00882 return 0;
00883 }
00884 #endif
00885
00886 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
00887 int i;
00888 for(i=0; i<count; i++){
00889 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00890 if(code != -1){
00891 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
00892 }else if(decorrelate){
00893 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00894 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00895 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
00896 }else{
00897 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00898 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00899 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00900 }
00901 if(alpha)
00902 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00903 }
00904 }
00905
00906 static void decode_bgr_bitstream(HYuvContext *s, int count){
00907 if(s->decorrelate){
00908 if(s->bitstream_bpp==24)
00909 decode_bgr_1(s, count, 1, 0);
00910 else
00911 decode_bgr_1(s, count, 1, 1);
00912 }else{
00913 if(s->bitstream_bpp==24)
00914 decode_bgr_1(s, count, 0, 0);
00915 else
00916 decode_bgr_1(s, count, 0, 1);
00917 }
00918 }
00919
00920 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
00921 int i;
00922
00923 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
00924 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00925 return -1;
00926 }
00927
00928 #define LOAD3\
00929 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
00930 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
00931 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
00932 int a= s->temp[0][planes*i+A];
00933 #define STAT3\
00934 s->stats[0][b]++;\
00935 s->stats[1][g]++;\
00936 s->stats[2][r]++;\
00937 if(planes==4) s->stats[2][a]++;
00938 #define WRITE3\
00939 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00940 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00941 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
00942 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
00943
00944 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
00945 for(i=0; i<count; i++){
00946 LOAD3;
00947 STAT3;
00948 }
00949 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
00950 for(i=0; i<count; i++){
00951 LOAD3;
00952 STAT3;
00953 WRITE3;
00954 }
00955 }else{
00956 for(i=0; i<count; i++){
00957 LOAD3;
00958 WRITE3;
00959 }
00960 }
00961 return 0;
00962 }
00963
00964 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00965 static void draw_slice(HYuvContext *s, int y){
00966 int h, cy, i;
00967 int offset[AV_NUM_DATA_POINTERS];
00968
00969 if(s->avctx->draw_horiz_band==NULL)
00970 return;
00971
00972 h= y - s->last_slice_end;
00973 y -= h;
00974
00975 if(s->bitstream_bpp==12){
00976 cy= y>>1;
00977 }else{
00978 cy= y;
00979 }
00980
00981 offset[0] = s->picture.linesize[0]*y;
00982 offset[1] = s->picture.linesize[1]*cy;
00983 offset[2] = s->picture.linesize[2]*cy;
00984 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
00985 offset[i] = 0;
00986 emms_c();
00987
00988 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
00989
00990 s->last_slice_end= y + h;
00991 }
00992
00993 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
00994 const uint8_t *buf = avpkt->data;
00995 int buf_size = avpkt->size;
00996 HYuvContext *s = avctx->priv_data;
00997 const int width= s->width;
00998 const int width2= s->width>>1;
00999 const int height= s->height;
01000 int fake_ystride, fake_ustride, fake_vstride;
01001 AVFrame * const p= &s->picture;
01002 int table_size= 0;
01003
01004 AVFrame *picture = data;
01005
01006 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
01007 if (!s->bitstream_buffer)
01008 return AVERROR(ENOMEM);
01009
01010 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
01011 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
01012
01013 if(p->data[0])
01014 ff_thread_release_buffer(avctx, p);
01015
01016 p->reference= 0;
01017 if(ff_thread_get_buffer(avctx, p) < 0){
01018 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01019 return -1;
01020 }
01021
01022 if(s->context){
01023 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
01024 if(table_size < 0)
01025 return -1;
01026 }
01027
01028 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
01029 return -1;
01030
01031 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
01032
01033 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
01034 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
01035 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
01036
01037 s->last_slice_end= 0;
01038
01039 if(s->bitstream_bpp<24){
01040 int y, cy;
01041 int lefty, leftu, leftv;
01042 int lefttopy, lefttopu, lefttopv;
01043
01044 if(s->yuy2){
01045 p->data[0][3]= get_bits(&s->gb, 8);
01046 p->data[0][2]= get_bits(&s->gb, 8);
01047 p->data[0][1]= get_bits(&s->gb, 8);
01048 p->data[0][0]= get_bits(&s->gb, 8);
01049
01050 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
01051 return -1;
01052 }else{
01053
01054 leftv= p->data[2][0]= get_bits(&s->gb, 8);
01055 lefty= p->data[0][1]= get_bits(&s->gb, 8);
01056 leftu= p->data[1][0]= get_bits(&s->gb, 8);
01057 p->data[0][0]= get_bits(&s->gb, 8);
01058
01059 switch(s->predictor){
01060 case LEFT:
01061 case PLANE:
01062 decode_422_bitstream(s, width-2);
01063 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01064 if(!(s->flags&CODEC_FLAG_GRAY)){
01065 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01066 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01067 }
01068
01069 for(cy=y=1; y<s->height; y++,cy++){
01070 uint8_t *ydst, *udst, *vdst;
01071
01072 if(s->bitstream_bpp==12){
01073 decode_gray_bitstream(s, width);
01074
01075 ydst= p->data[0] + p->linesize[0]*y;
01076
01077 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01078 if(s->predictor == PLANE){
01079 if(y>s->interlaced)
01080 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01081 }
01082 y++;
01083 if(y>=s->height) break;
01084 }
01085
01086 draw_slice(s, y);
01087
01088 ydst= p->data[0] + p->linesize[0]*y;
01089 udst= p->data[1] + p->linesize[1]*cy;
01090 vdst= p->data[2] + p->linesize[2]*cy;
01091
01092 decode_422_bitstream(s, width);
01093 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01094 if(!(s->flags&CODEC_FLAG_GRAY)){
01095 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01096 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01097 }
01098 if(s->predictor == PLANE){
01099 if(cy>s->interlaced){
01100 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01101 if(!(s->flags&CODEC_FLAG_GRAY)){
01102 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01103 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01104 }
01105 }
01106 }
01107 }
01108 draw_slice(s, height);
01109
01110 break;
01111 case MEDIAN:
01112
01113 decode_422_bitstream(s, width-2);
01114 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01115 if(!(s->flags&CODEC_FLAG_GRAY)){
01116 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
01117 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
01118 }
01119
01120 cy=y=1;
01121
01122
01123 if(s->interlaced){
01124 decode_422_bitstream(s, width);
01125 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01126 if(!(s->flags&CODEC_FLAG_GRAY)){
01127 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01128 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01129 }
01130 y++; cy++;
01131 }
01132
01133
01134 decode_422_bitstream(s, 4);
01135 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01136 if(!(s->flags&CODEC_FLAG_GRAY)){
01137 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01138 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01139 }
01140
01141
01142 lefttopy= p->data[0][3];
01143 decode_422_bitstream(s, width-4);
01144 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01145 if(!(s->flags&CODEC_FLAG_GRAY)){
01146 lefttopu= p->data[1][1];
01147 lefttopv= p->data[2][1];
01148 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
01149 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
01150 }
01151 y++; cy++;
01152
01153 for(; y<height; y++,cy++){
01154 uint8_t *ydst, *udst, *vdst;
01155
01156 if(s->bitstream_bpp==12){
01157 while(2*cy > y){
01158 decode_gray_bitstream(s, width);
01159 ydst= p->data[0] + p->linesize[0]*y;
01160 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01161 y++;
01162 }
01163 if(y>=height) break;
01164 }
01165 draw_slice(s, y);
01166
01167 decode_422_bitstream(s, width);
01168
01169 ydst= p->data[0] + p->linesize[0]*y;
01170 udst= p->data[1] + p->linesize[1]*cy;
01171 vdst= p->data[2] + p->linesize[2]*cy;
01172
01173 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01174 if(!(s->flags&CODEC_FLAG_GRAY)){
01175 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01176 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01177 }
01178 }
01179
01180 draw_slice(s, height);
01181 break;
01182 }
01183 }
01184 }else{
01185 int y;
01186 int leftr, leftg, leftb, lefta;
01187 const int last_line= (height-1)*p->linesize[0];
01188
01189 if(s->bitstream_bpp==32){
01190 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
01191 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01192 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01193 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01194 }else{
01195 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
01196 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
01197 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
01198 lefta= p->data[0][last_line+A]= 255;
01199 skip_bits(&s->gb, 8);
01200 }
01201
01202 if(s->bgr32){
01203 switch(s->predictor){
01204 case LEFT:
01205 case PLANE:
01206 decode_bgr_bitstream(s, width-1);
01207 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
01208
01209 for(y=s->height-2; y>=0; y--){
01210 decode_bgr_bitstream(s, width);
01211
01212 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01213 if(s->predictor == PLANE){
01214 if(s->bitstream_bpp!=32) lefta=0;
01215 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
01216 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
01217 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
01218 }
01219 }
01220 }
01221 draw_slice(s, height);
01222 break;
01223 default:
01224 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
01225 }
01226 }else{
01227
01228 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
01229 return -1;
01230 }
01231 }
01232 emms_c();
01233
01234 *picture= *p;
01235 *data_size = sizeof(AVFrame);
01236
01237 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
01238 }
01239 #endif
01240
01241 static int common_end(HYuvContext *s){
01242 int i;
01243
01244 for(i=0; i<3; i++){
01245 av_freep(&s->temp[i]);
01246 }
01247 return 0;
01248 }
01249
01250 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01251 static av_cold int decode_end(AVCodecContext *avctx)
01252 {
01253 HYuvContext *s = avctx->priv_data;
01254 int i;
01255
01256 if (s->picture.data[0])
01257 avctx->release_buffer(avctx, &s->picture);
01258
01259 common_end(s);
01260 av_freep(&s->bitstream_buffer);
01261
01262 for(i=0; i<6; i++){
01263 ff_free_vlc(&s->vlc[i]);
01264 }
01265
01266 return 0;
01267 }
01268 #endif
01269
01270 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01271 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
01272 const AVFrame *pict, int *got_packet)
01273 {
01274 HYuvContext *s = avctx->priv_data;
01275 const int width= s->width;
01276 const int width2= s->width>>1;
01277 const int height= s->height;
01278 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01279 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01280 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01281 AVFrame * const p= &s->picture;
01282 int i, j, size = 0, ret;
01283
01284 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
01285 return ret;
01286
01287 *p = *pict;
01288 p->pict_type= AV_PICTURE_TYPE_I;
01289 p->key_frame= 1;
01290
01291 if(s->context){
01292 for(i=0; i<3; i++){
01293 generate_len_table(s->len[i], s->stats[i]);
01294 if(generate_bits_table(s->bits[i], s->len[i])<0)
01295 return -1;
01296 size += store_table(s, s->len[i], &pkt->data[size]);
01297 }
01298
01299 for(i=0; i<3; i++)
01300 for(j=0; j<256; j++)
01301 s->stats[i][j] >>= 1;
01302 }
01303
01304 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
01305
01306 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
01307 int lefty, leftu, leftv, y, cy;
01308
01309 put_bits(&s->pb, 8, leftv= p->data[2][0]);
01310 put_bits(&s->pb, 8, lefty= p->data[0][1]);
01311 put_bits(&s->pb, 8, leftu= p->data[1][0]);
01312 put_bits(&s->pb, 8, p->data[0][0]);
01313
01314 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01315 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01316 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01317
01318 encode_422_bitstream(s, 2, width-2);
01319
01320 if(s->predictor==MEDIAN){
01321 int lefttopy, lefttopu, lefttopv;
01322 cy=y=1;
01323 if(s->interlaced){
01324 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
01325 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
01326 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
01327
01328 encode_422_bitstream(s, 0, width);
01329 y++; cy++;
01330 }
01331
01332 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
01333 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
01334 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
01335
01336 encode_422_bitstream(s, 0, 4);
01337
01338 lefttopy= p->data[0][3];
01339 lefttopu= p->data[1][1];
01340 lefttopv= p->data[2][1];
01341 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
01342 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
01343 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
01344 encode_422_bitstream(s, 0, width-4);
01345 y++; cy++;
01346
01347 for(; y<height; y++,cy++){
01348 uint8_t *ydst, *udst, *vdst;
01349
01350 if(s->bitstream_bpp==12){
01351 while(2*cy > y){
01352 ydst= p->data[0] + p->linesize[0]*y;
01353 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01354 encode_gray_bitstream(s, width);
01355 y++;
01356 }
01357 if(y>=height) break;
01358 }
01359 ydst= p->data[0] + p->linesize[0]*y;
01360 udst= p->data[1] + p->linesize[1]*cy;
01361 vdst= p->data[2] + p->linesize[2]*cy;
01362
01363 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01364 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01365 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01366
01367 encode_422_bitstream(s, 0, width);
01368 }
01369 }else{
01370 for(cy=y=1; y<height; y++,cy++){
01371 uint8_t *ydst, *udst, *vdst;
01372
01373
01374 if(s->bitstream_bpp==12){
01375 ydst= p->data[0] + p->linesize[0]*y;
01376
01377 if(s->predictor == PLANE && s->interlaced < y){
01378 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01379
01380 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01381 }else{
01382 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01383 }
01384 encode_gray_bitstream(s, width);
01385 y++;
01386 if(y>=height) break;
01387 }
01388
01389 ydst= p->data[0] + p->linesize[0]*y;
01390 udst= p->data[1] + p->linesize[1]*cy;
01391 vdst= p->data[2] + p->linesize[2]*cy;
01392
01393 if(s->predictor == PLANE && s->interlaced < cy){
01394 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01395 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01396 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01397
01398 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01399 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01400 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01401 }else{
01402 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01403 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01404 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01405 }
01406
01407 encode_422_bitstream(s, 0, width);
01408 }
01409 }
01410 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
01411 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01412 const int stride = -p->linesize[0];
01413 const int fake_stride = -fake_ystride;
01414 int y;
01415 int leftr, leftg, leftb, lefta;
01416
01417 put_bits(&s->pb, 8, lefta= data[A]);
01418 put_bits(&s->pb, 8, leftr= data[R]);
01419 put_bits(&s->pb, 8, leftg= data[G]);
01420 put_bits(&s->pb, 8, leftb= data[B]);
01421
01422 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
01423 encode_bgra_bitstream(s, width-1, 4);
01424
01425 for(y=1; y<s->height; y++){
01426 uint8_t *dst = data + y*stride;
01427 if(s->predictor == PLANE && s->interlaced < y){
01428 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
01429 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
01430 }else{
01431 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
01432 }
01433 encode_bgra_bitstream(s, width, 4);
01434 }
01435 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
01436 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01437 const int stride = -p->linesize[0];
01438 const int fake_stride = -fake_ystride;
01439 int y;
01440 int leftr, leftg, leftb;
01441
01442 put_bits(&s->pb, 8, leftr= data[0]);
01443 put_bits(&s->pb, 8, leftg= data[1]);
01444 put_bits(&s->pb, 8, leftb= data[2]);
01445 put_bits(&s->pb, 8, 0);
01446
01447 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
01448 encode_bgra_bitstream(s, width-1, 3);
01449
01450 for(y=1; y<s->height; y++){
01451 uint8_t *dst = data + y*stride;
01452 if(s->predictor == PLANE && s->interlaced < y){
01453 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
01454 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01455 }else{
01456 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01457 }
01458 encode_bgra_bitstream(s, width, 3);
01459 }
01460 }else{
01461 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01462 }
01463 emms_c();
01464
01465 size+= (put_bits_count(&s->pb)+31)/8;
01466 put_bits(&s->pb, 16, 0);
01467 put_bits(&s->pb, 15, 0);
01468 size/= 4;
01469
01470 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
01471 int j;
01472 char *p= avctx->stats_out;
01473 char *end= p + 1024*30;
01474 for(i=0; i<3; i++){
01475 for(j=0; j<256; j++){
01476 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01477 p+= strlen(p);
01478 s->stats[i][j]= 0;
01479 }
01480 snprintf(p, end-p, "\n");
01481 p++;
01482 }
01483 } else
01484 avctx->stats_out[0] = '\0';
01485 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
01486 flush_put_bits(&s->pb);
01487 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
01488 }
01489
01490 s->picture_number++;
01491
01492 pkt->size = size*4;
01493 pkt->flags |= AV_PKT_FLAG_KEY;
01494 *got_packet = 1;
01495
01496 return 0;
01497 }
01498
01499 static av_cold int encode_end(AVCodecContext *avctx)
01500 {
01501 HYuvContext *s = avctx->priv_data;
01502
01503 common_end(s);
01504
01505 av_freep(&avctx->extradata);
01506 av_freep(&avctx->stats_out);
01507
01508 return 0;
01509 }
01510 #endif
01511
01512 #if CONFIG_HUFFYUV_DECODER
01513 AVCodec ff_huffyuv_decoder = {
01514 .name = "huffyuv",
01515 .type = AVMEDIA_TYPE_VIDEO,
01516 .id = CODEC_ID_HUFFYUV,
01517 .priv_data_size = sizeof(HYuvContext),
01518 .init = decode_init,
01519 .close = decode_end,
01520 .decode = decode_frame,
01521 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01522 CODEC_CAP_FRAME_THREADS,
01523 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01524 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01525 };
01526 #endif
01527
01528 #if CONFIG_FFVHUFF_DECODER
01529 AVCodec ff_ffvhuff_decoder = {
01530 .name = "ffvhuff",
01531 .type = AVMEDIA_TYPE_VIDEO,
01532 .id = CODEC_ID_FFVHUFF,
01533 .priv_data_size = sizeof(HYuvContext),
01534 .init = decode_init,
01535 .close = decode_end,
01536 .decode = decode_frame,
01537 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01538 CODEC_CAP_FRAME_THREADS,
01539 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01540 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01541 };
01542 #endif
01543
01544 #if CONFIG_HUFFYUV_ENCODER
01545 AVCodec ff_huffyuv_encoder = {
01546 .name = "huffyuv",
01547 .type = AVMEDIA_TYPE_VIDEO,
01548 .id = CODEC_ID_HUFFYUV,
01549 .priv_data_size = sizeof(HYuvContext),
01550 .init = encode_init,
01551 .encode2 = encode_frame,
01552 .close = encode_end,
01553 .pix_fmts = (const enum PixelFormat[]){
01554 PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
01555 },
01556 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01557 };
01558 #endif
01559
01560 #if CONFIG_FFVHUFF_ENCODER
01561 AVCodec ff_ffvhuff_encoder = {
01562 .name = "ffvhuff",
01563 .type = AVMEDIA_TYPE_VIDEO,
01564 .id = CODEC_ID_FFVHUFF,
01565 .priv_data_size = sizeof(HYuvContext),
01566 .init = encode_init,
01567 .encode2 = encode_frame,
01568 .close = encode_end,
01569 .pix_fmts = (const enum PixelFormat[]){
01570 PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
01571 },
01572 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01573 };
01574 #endif