00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00031 #include "avcodec.h"
00032 #include "internal.h"
00033 #include "get_bits.h"
00034 #include "put_bits.h"
00035 #include "dsputil.h"
00036 #include "thread.h"
00037 #include "huffman.h"
00038
00039 #define VLC_BITS 11
00040
00041 #if HAVE_BIGENDIAN
00042 #define B 3
00043 #define G 2
00044 #define R 1
00045 #define A 0
00046 #else
00047 #define B 0
00048 #define G 1
00049 #define R 2
00050 #define A 3
00051 #endif
00052
00053 typedef enum Predictor {
00054 LEFT= 0,
00055 PLANE,
00056 MEDIAN,
00057 } Predictor;
00058
00059 typedef struct HYuvContext {
00060 AVCodecContext *avctx;
00061 Predictor predictor;
00062 GetBitContext gb;
00063 PutBitContext pb;
00064 int interlaced;
00065 int decorrelate;
00066 int bitstream_bpp;
00067 int version;
00068 int yuy2;
00069 int bgr32;
00070 int width, height;
00071 int flags;
00072 int context;
00073 int picture_number;
00074 int last_slice_end;
00075 uint8_t *temp[3];
00076 uint64_t stats[3][256];
00077 uint8_t len[3][256];
00078 uint32_t bits[3][256];
00079 uint32_t pix_bgr_map[1<<VLC_BITS];
00080 VLC vlc[6];
00081 AVFrame picture;
00082 uint8_t *bitstream_buffer;
00083 unsigned int bitstream_buffer_size;
00084 DSPContext dsp;
00085 } HYuvContext;
00086
00087 #define classic_shift_luma_table_size 42
00088 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00089 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
00090 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
00091 69,68, 0,
00092 0,0,0,0,0,0,0,0,
00093 };
00094
00095 #define classic_shift_chroma_table_size 59
00096 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
00097 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
00098 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
00099 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
00100 0,0,0,0,0,0,0,0,
00101 };
00102
00103 static const unsigned char classic_add_luma[256] = {
00104 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
00105 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
00106 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
00107 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
00108 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
00109 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
00110 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
00111 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
00112 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
00113 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
00114 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
00115 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
00116 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
00117 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
00118 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
00119 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
00120 };
00121
00122 static const unsigned char classic_add_chroma[256] = {
00123 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
00124 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
00125 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
00126 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
00127 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
00128 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
00129 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
00130 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
00131 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
00132 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
00133 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
00134 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
00135 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
00136 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
00137 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
00138 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
00139 };
00140
00141 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
00142 const uint8_t *src, int w, int left)
00143 {
00144 int i;
00145 if (w < 32) {
00146 for (i = 0; i < w; i++) {
00147 const int temp = src[i];
00148 dst[i] = temp - left;
00149 left = temp;
00150 }
00151 return left;
00152 } else {
00153 for (i = 0; i < 16; i++) {
00154 const int temp = src[i];
00155 dst[i] = temp - left;
00156 left = temp;
00157 }
00158 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
00159 return src[w-1];
00160 }
00161 }
00162
00163 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
00164 const uint8_t *src, int w,
00165 int *red, int *green, int *blue, int *alpha)
00166 {
00167 int i;
00168 int r,g,b,a;
00169 r = *red;
00170 g = *green;
00171 b = *blue;
00172 a = *alpha;
00173 for (i = 0; i < FFMIN(w, 4); i++) {
00174 const int rt = src[i * 4 + R];
00175 const int gt = src[i * 4 + G];
00176 const int bt = src[i * 4 + B];
00177 const int at = src[i * 4 + A];
00178 dst[i * 4 + R] = rt - r;
00179 dst[i * 4 + G] = gt - g;
00180 dst[i * 4 + B] = bt - b;
00181 dst[i * 4 + A] = at - a;
00182 r = rt;
00183 g = gt;
00184 b = bt;
00185 a = at;
00186 }
00187
00188 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
00189
00190 *red = src[(w - 1) * 4 + R];
00191 *green = src[(w - 1) * 4 + G];
00192 *blue = src[(w - 1) * 4 + B];
00193 *alpha = src[(w - 1) * 4 + A];
00194 }
00195
00196 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
00197 int i;
00198 int r,g,b;
00199 r = *red;
00200 g = *green;
00201 b = *blue;
00202 for (i = 0; i < FFMIN(w,16); i++) {
00203 const int rt = src[i*3 + 0];
00204 const int gt = src[i*3 + 1];
00205 const int bt = src[i*3 + 2];
00206 dst[i*3 + 0] = rt - r;
00207 dst[i*3 + 1] = gt - g;
00208 dst[i*3 + 2] = bt - b;
00209 r = rt;
00210 g = gt;
00211 b = bt;
00212 }
00213
00214 s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
00215
00216 *red = src[(w - 1)*3 + 0];
00217 *green = src[(w - 1)*3 + 1];
00218 *blue = src[(w - 1)*3 + 2];
00219 }
00220
00221 static int read_len_table(uint8_t *dst, GetBitContext *gb)
00222 {
00223 int i, val, repeat;
00224
00225 for (i = 0; i < 256;) {
00226 repeat = get_bits(gb, 3);
00227 val = get_bits(gb, 5);
00228 if (repeat == 0)
00229 repeat = get_bits(gb, 8);
00230 if (i + repeat > 256 || get_bits_left(gb) < 0) {
00231 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
00232 return -1;
00233 }
00234 while (repeat--)
00235 dst[i++] = val;
00236 }
00237 return 0;
00238 }
00239
00240 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
00241 {
00242 int len, index;
00243 uint32_t bits = 0;
00244
00245 for (len = 32; len > 0; len--) {
00246 for (index = 0; index < 256; index++) {
00247 if (len_table[index] == len)
00248 dst[index] = bits++;
00249 }
00250 if (bits & 1) {
00251 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
00252 return -1;
00253 }
00254 bits >>= 1;
00255 }
00256 return 0;
00257 }
00258
00259 static void generate_joint_tables(HYuvContext *s)
00260 {
00261 uint16_t symbols[1 << VLC_BITS];
00262 uint16_t bits[1 << VLC_BITS];
00263 uint8_t len[1 << VLC_BITS];
00264 if (s->bitstream_bpp < 24) {
00265 int p, i, y, u;
00266 for (p = 0; p < 3; p++) {
00267 for (i = y = 0; y < 256; y++) {
00268 int len0 = s->len[0][y];
00269 int limit = VLC_BITS - len0;
00270 if(limit <= 0)
00271 continue;
00272 for (u = 0; u < 256; u++) {
00273 int len1 = s->len[p][u];
00274 if (len1 > limit)
00275 continue;
00276 len[i] = len0 + len1;
00277 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
00278 symbols[i] = (y << 8) + u;
00279 if(symbols[i] != 0xffff)
00280 i++;
00281 }
00282 }
00283 ff_free_vlc(&s->vlc[3 + p]);
00284 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
00285 bits, 2, 2, symbols, 2, 2, 0);
00286 }
00287 } else {
00288 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
00289 int i, b, g, r, code;
00290 int p0 = s->decorrelate;
00291 int p1 = !s->decorrelate;
00292
00293
00294
00295 for (i = 0, g = -16; g < 16; g++) {
00296 int len0 = s->len[p0][g & 255];
00297 int limit0 = VLC_BITS - len0;
00298 if (limit0 < 2)
00299 continue;
00300 for (b = -16; b < 16; b++) {
00301 int len1 = s->len[p1][b & 255];
00302 int limit1 = limit0 - len1;
00303 if (limit1 < 1)
00304 continue;
00305 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
00306 for (r = -16; r < 16; r++) {
00307 int len2 = s->len[2][r & 255];
00308 if (len2 > limit1)
00309 continue;
00310 len[i] = len0 + len1 + len2;
00311 bits[i] = (code << len2) + s->bits[2][r & 255];
00312 if (s->decorrelate) {
00313 map[i][G] = g;
00314 map[i][B] = g + b;
00315 map[i][R] = g + r;
00316 } else {
00317 map[i][B] = g;
00318 map[i][G] = b;
00319 map[i][R] = r;
00320 }
00321 i++;
00322 }
00323 }
00324 }
00325 ff_free_vlc(&s->vlc[3]);
00326 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
00327 }
00328 }
00329
00330 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
00331 {
00332 GetBitContext gb;
00333 int i;
00334
00335 init_get_bits(&gb, src, length * 8);
00336
00337 for (i = 0; i < 3; i++) {
00338 if (read_len_table(s->len[i], &gb) < 0)
00339 return -1;
00340 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
00341 return -1;
00342 }
00343 ff_free_vlc(&s->vlc[i]);
00344 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00345 s->bits[i], 4, 4, 0);
00346 }
00347
00348 generate_joint_tables(s);
00349
00350 return (get_bits_count(&gb) + 7) / 8;
00351 }
00352
00353 static int read_old_huffman_tables(HYuvContext *s)
00354 {
00355 GetBitContext gb;
00356 int i;
00357
00358 init_get_bits(&gb, classic_shift_luma,
00359 classic_shift_luma_table_size * 8);
00360 if (read_len_table(s->len[0], &gb) < 0)
00361 return -1;
00362
00363 init_get_bits(&gb, classic_shift_chroma,
00364 classic_shift_chroma_table_size * 8);
00365 if (read_len_table(s->len[1], &gb) < 0)
00366 return -1;
00367
00368 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
00369 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
00370
00371 if (s->bitstream_bpp >= 24) {
00372 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
00373 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
00374 }
00375 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
00376 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
00377
00378 for (i = 0; i < 3; i++) {
00379 ff_free_vlc(&s->vlc[i]);
00380 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
00381 s->bits[i], 4, 4, 0);
00382 }
00383
00384 generate_joint_tables(s);
00385
00386 return 0;
00387 }
00388
00389 static av_cold int alloc_temp(HYuvContext *s)
00390 {
00391 int i;
00392
00393 if (s->bitstream_bpp<24) {
00394 for (i=0; i<3; i++) {
00395 s->temp[i]= av_malloc(s->width + 16);
00396 if (!s->temp[i])
00397 return AVERROR(ENOMEM);
00398 }
00399 } else {
00400 s->temp[0]= av_mallocz(4*s->width + 16);
00401 if (!s->temp[0])
00402 return AVERROR(ENOMEM);
00403 }
00404
00405 return 0;
00406 }
00407
00408 static av_cold int common_init(AVCodecContext *avctx)
00409 {
00410 HYuvContext *s = avctx->priv_data;
00411
00412 s->avctx = avctx;
00413 s->flags = avctx->flags;
00414
00415 ff_dsputil_init(&s->dsp, avctx);
00416
00417 s->width = avctx->width;
00418 s->height = avctx->height;
00419 av_assert1(s->width > 0 && s->height > 0);
00420
00421 return 0;
00422 }
00423
00424 static av_cold int common_end(HYuvContext *s)
00425 {
00426 int i;
00427
00428 for(i = 0; i < 3; i++) {
00429 av_freep(&s->temp[i]);
00430 }
00431 return 0;
00432 }
00433
00434 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00435 static av_cold int decode_init(AVCodecContext *avctx)
00436 {
00437 HYuvContext *s = avctx->priv_data;
00438
00439 common_init(avctx);
00440 memset(s->vlc, 0, 3 * sizeof(VLC));
00441
00442 avctx->coded_frame = &s->picture;
00443 avcodec_get_frame_defaults(&s->picture);
00444 s->interlaced = s->height > 288;
00445
00446 s->bgr32 = 1;
00447
00448 if (avctx->extradata_size) {
00449 if ((avctx->bits_per_coded_sample & 7) &&
00450 avctx->bits_per_coded_sample != 12)
00451 s->version = 1;
00452 else
00453 s->version = 2;
00454 } else
00455 s->version = 0;
00456
00457 if (s->version == 2) {
00458 int method, interlace;
00459
00460 if (avctx->extradata_size < 4)
00461 return -1;
00462
00463 method = ((uint8_t*)avctx->extradata)[0];
00464 s->decorrelate = method & 64 ? 1 : 0;
00465 s->predictor = method & 63;
00466 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
00467 if (s->bitstream_bpp == 0)
00468 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
00469 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
00470 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
00471 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
00472
00473 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00474 avctx->extradata_size - 4) < 0)
00475 return AVERROR_INVALIDDATA;
00476 }else{
00477 switch (avctx->bits_per_coded_sample & 7) {
00478 case 1:
00479 s->predictor = LEFT;
00480 s->decorrelate = 0;
00481 break;
00482 case 2:
00483 s->predictor = LEFT;
00484 s->decorrelate = 1;
00485 break;
00486 case 3:
00487 s->predictor = PLANE;
00488 s->decorrelate = avctx->bits_per_coded_sample >= 24;
00489 break;
00490 case 4:
00491 s->predictor = MEDIAN;
00492 s->decorrelate = 0;
00493 break;
00494 default:
00495 s->predictor = LEFT;
00496 s->decorrelate = 0;
00497 break;
00498 }
00499 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
00500 s->context = 0;
00501
00502 if (read_old_huffman_tables(s) < 0)
00503 return AVERROR_INVALIDDATA;
00504 }
00505
00506 switch (s->bitstream_bpp) {
00507 case 12:
00508 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
00509 break;
00510 case 16:
00511 if (s->yuy2) {
00512 avctx->pix_fmt = AV_PIX_FMT_YUYV422;
00513 } else {
00514 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
00515 }
00516 break;
00517 case 24:
00518 case 32:
00519 if (s->bgr32) {
00520 avctx->pix_fmt = AV_PIX_FMT_RGB32;
00521 } else {
00522 avctx->pix_fmt = AV_PIX_FMT_BGR24;
00523 }
00524 break;
00525 default:
00526 return AVERROR_INVALIDDATA;
00527 }
00528
00529 if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
00530 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00531 return AVERROR_INVALIDDATA;
00532 }
00533 if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P && avctx->width%4) {
00534 av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 this colorspace and predictor\n");
00535 return AVERROR_INVALIDDATA;
00536 }
00537 if (alloc_temp(s)) {
00538 common_end(s);
00539 return AVERROR(ENOMEM);
00540 }
00541
00542 return 0;
00543 }
00544
00545 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
00546 {
00547 HYuvContext *s = avctx->priv_data;
00548 int i;
00549
00550 avctx->coded_frame= &s->picture;
00551 if (alloc_temp(s)) {
00552 common_end(s);
00553 return AVERROR(ENOMEM);
00554 }
00555
00556 for (i = 0; i < 6; i++)
00557 s->vlc[i].table = NULL;
00558
00559 if (s->version == 2) {
00560 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
00561 avctx->extradata_size) < 0)
00562 return AVERROR_INVALIDDATA;
00563 } else {
00564 if (read_old_huffman_tables(s) < 0)
00565 return AVERROR_INVALIDDATA;
00566 }
00567
00568 return 0;
00569 }
00570 #endif
00571
00572 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00573 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
00574 {
00575 int i;
00576 int index = 0;
00577
00578 for (i = 0; i < 256;) {
00579 int val = len[i];
00580 int repeat = 0;
00581
00582 for (; i < 256 && len[i] == val && repeat < 255; i++)
00583 repeat++;
00584
00585 av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
00586 if (repeat > 7) {
00587 buf[index++] = val;
00588 buf[index++] = repeat;
00589 } else {
00590 buf[index++] = val | (repeat << 5);
00591 }
00592 }
00593
00594 return index;
00595 }
00596
00597 static av_cold int encode_init(AVCodecContext *avctx)
00598 {
00599 HYuvContext *s = avctx->priv_data;
00600 int i, j;
00601
00602 common_init(avctx);
00603
00604 avctx->extradata = av_mallocz(1024*30);
00605 avctx->stats_out = av_mallocz(1024*30);
00606 if (!avctx->extradata || !avctx->stats_out) {
00607 av_freep(&avctx->stats_out);
00608 return AVERROR(ENOMEM);
00609 }
00610 s->version = 2;
00611
00612 avctx->coded_frame = &s->picture;
00613
00614 switch (avctx->pix_fmt) {
00615 case AV_PIX_FMT_YUV420P:
00616 case AV_PIX_FMT_YUV422P:
00617 if (s->width & 1) {
00618 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
00619 return AVERROR(EINVAL);
00620 }
00621 s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
00622 break;
00623 case AV_PIX_FMT_RGB32:
00624 s->bitstream_bpp = 32;
00625 break;
00626 case AV_PIX_FMT_RGB24:
00627 s->bitstream_bpp = 24;
00628 break;
00629 default:
00630 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
00631 return AVERROR(EINVAL);
00632 }
00633 avctx->bits_per_coded_sample = s->bitstream_bpp;
00634 s->decorrelate = s->bitstream_bpp >= 24;
00635 s->predictor = avctx->prediction_method;
00636 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
00637 if (avctx->context_model == 1) {
00638 s->context = avctx->context_model;
00639 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
00640 av_log(avctx, AV_LOG_ERROR,
00641 "context=1 is not compatible with "
00642 "2 pass huffyuv encoding\n");
00643 return AVERROR(EINVAL);
00644 }
00645 }else s->context= 0;
00646
00647 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
00648 if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
00649 av_log(avctx, AV_LOG_ERROR,
00650 "Error: YV12 is not supported by huffyuv; use "
00651 "vcodec=ffvhuff or format=422p\n");
00652 return AVERROR(EINVAL);
00653 }
00654 if (avctx->context_model) {
00655 av_log(avctx, AV_LOG_ERROR,
00656 "Error: per-frame huffman tables are not supported "
00657 "by huffyuv; use vcodec=ffvhuff\n");
00658 return AVERROR(EINVAL);
00659 }
00660 if (s->interlaced != ( s->height > 288 ))
00661 av_log(avctx, AV_LOG_INFO,
00662 "using huffyuv 2.2.0 or newer interlacing flag\n");
00663 }
00664
00665 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
00666 av_log(avctx, AV_LOG_ERROR,
00667 "Error: RGB is incompatible with median predictor\n");
00668 return AVERROR(EINVAL);
00669 }
00670
00671 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
00672 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
00673 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
00674 if (s->context)
00675 ((uint8_t*)avctx->extradata)[2] |= 0x40;
00676 ((uint8_t*)avctx->extradata)[3] = 0;
00677 s->avctx->extradata_size = 4;
00678
00679 if (avctx->stats_in) {
00680 char *p = avctx->stats_in;
00681
00682 for (i = 0; i < 3; i++)
00683 for (j = 0; j < 256; j++)
00684 s->stats[i][j] = 1;
00685
00686 for (;;) {
00687 for (i = 0; i < 3; i++) {
00688 char *next;
00689
00690 for (j = 0; j < 256; j++) {
00691 s->stats[i][j] += strtol(p, &next, 0);
00692 if (next == p) return -1;
00693 p = next;
00694 }
00695 }
00696 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
00697 }
00698 } else {
00699 for (i = 0; i < 3; i++)
00700 for (j = 0; j < 256; j++) {
00701 int d = FFMIN(j, 256 - j);
00702
00703 s->stats[i][j] = 100000000 / (d + 1);
00704 }
00705 }
00706
00707 for (i = 0; i < 3; i++) {
00708 ff_huff_gen_len_table(s->len[i], s->stats[i]);
00709
00710 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
00711 return -1;
00712 }
00713
00714 s->avctx->extradata_size +=
00715 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
00716 }
00717
00718 if (s->context) {
00719 for (i = 0; i < 3; i++) {
00720 int pels = s->width * s->height / (i ? 40 : 10);
00721 for (j = 0; j < 256; j++) {
00722 int d = FFMIN(j, 256 - j);
00723 s->stats[i][j] = pels/(d + 1);
00724 }
00725 }
00726 } else {
00727 for (i = 0; i < 3; i++)
00728 for (j = 0; j < 256; j++)
00729 s->stats[i][j]= 0;
00730 }
00731
00732 if (alloc_temp(s)) {
00733 common_end(s);
00734 return AVERROR(ENOMEM);
00735 }
00736
00737 s->picture_number=0;
00738
00739 return 0;
00740 }
00741 #endif
00742
00743
00744
00745 #define READ_2PIX(dst0, dst1, plane1){\
00746 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
00747 if(code != 0xffff){\
00748 dst0 = code>>8;\
00749 dst1 = code;\
00750 }else{\
00751 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
00752 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
00753 }\
00754 }
00755
00756 static void decode_422_bitstream(HYuvContext *s, int count)
00757 {
00758 int i;
00759
00760 count /= 2;
00761
00762 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
00763 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00764 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
00765 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
00766 }
00767 } else {
00768 for (i = 0; i < count; i++) {
00769 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
00770 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
00771 }
00772 }
00773 }
00774
00775 static void decode_gray_bitstream(HYuvContext *s, int count)
00776 {
00777 int i;
00778
00779 count/=2;
00780
00781 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
00782 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
00783 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
00784 }
00785 } else {
00786 for(i=0; i<count; i++){
00787 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
00788 }
00789 }
00790 }
00791
00792 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
00793 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
00794 {
00795 int i;
00796 const uint8_t *y = s->temp[0] + offset;
00797 const uint8_t *u = s->temp[1] + offset / 2;
00798 const uint8_t *v = s->temp[2] + offset / 2;
00799
00800 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
00801 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00802 return -1;
00803 }
00804
00805 #define LOAD4\
00806 int y0 = y[2 * i];\
00807 int y1 = y[2 * i + 1];\
00808 int u0 = u[i];\
00809 int v0 = v[i];
00810
00811 count /= 2;
00812
00813 if (s->flags & CODEC_FLAG_PASS1) {
00814 for(i = 0; i < count; i++) {
00815 LOAD4;
00816 s->stats[0][y0]++;
00817 s->stats[1][u0]++;
00818 s->stats[0][y1]++;
00819 s->stats[2][v0]++;
00820 }
00821 }
00822 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
00823 return 0;
00824 if (s->context) {
00825 for (i = 0; i < count; i++) {
00826 LOAD4;
00827 s->stats[0][y0]++;
00828 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00829 s->stats[1][u0]++;
00830 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00831 s->stats[0][y1]++;
00832 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00833 s->stats[2][v0]++;
00834 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00835 }
00836 } else {
00837 for(i = 0; i < count; i++) {
00838 LOAD4;
00839 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
00840 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
00841 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00842 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
00843 }
00844 }
00845 return 0;
00846 }
00847
00848 static int encode_gray_bitstream(HYuvContext *s, int count)
00849 {
00850 int i;
00851
00852 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
00853 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00854 return -1;
00855 }
00856
00857 #define LOAD2\
00858 int y0 = s->temp[0][2 * i];\
00859 int y1 = s->temp[0][2 * i + 1];
00860 #define STAT2\
00861 s->stats[0][y0]++;\
00862 s->stats[0][y1]++;
00863 #define WRITE2\
00864 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
00865 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
00866
00867 count /= 2;
00868
00869 if (s->flags & CODEC_FLAG_PASS1) {
00870 for (i = 0; i < count; i++) {
00871 LOAD2;
00872 STAT2;
00873 }
00874 }
00875 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
00876 return 0;
00877
00878 if (s->context) {
00879 for (i = 0; i < count; i++) {
00880 LOAD2;
00881 STAT2;
00882 WRITE2;
00883 }
00884 } else {
00885 for (i = 0; i < count; i++) {
00886 LOAD2;
00887 WRITE2;
00888 }
00889 }
00890 return 0;
00891 }
00892 #endif
00893
00894 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
00895 int decorrelate, int alpha)
00896 {
00897 int i;
00898 for (i = 0; i < count; i++) {
00899 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
00900 if (code != -1) {
00901 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
00902 } else if(decorrelate) {
00903 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00904 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
00905 s->temp[0][4 * i + G];
00906 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
00907 s->temp[0][4 * i + G];
00908 } else {
00909 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
00910 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
00911 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00912 }
00913 if (alpha)
00914 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
00915 }
00916 }
00917
00918 static void decode_bgr_bitstream(HYuvContext *s, int count)
00919 {
00920 if (s->decorrelate) {
00921 if (s->bitstream_bpp==24)
00922 decode_bgr_1(s, count, 1, 0);
00923 else
00924 decode_bgr_1(s, count, 1, 1);
00925 } else {
00926 if (s->bitstream_bpp==24)
00927 decode_bgr_1(s, count, 0, 0);
00928 else
00929 decode_bgr_1(s, count, 0, 1);
00930 }
00931 }
00932
00933 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
00934 {
00935 int i;
00936
00937 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
00938 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
00939 return -1;
00940 }
00941
00942 #define LOAD3\
00943 int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
00944 int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
00945 int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
00946 int a = s->temp[0][planes*i + A];
00947 #define STAT3\
00948 s->stats[0][b]++;\
00949 s->stats[1][g]++;\
00950 s->stats[2][r]++;\
00951 if(planes==4) s->stats[2][a]++;
00952 #define WRITE3\
00953 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
00954 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
00955 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
00956 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
00957
00958 if ((s->flags & CODEC_FLAG_PASS1) &&
00959 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
00960 for (i = 0; i < count; i++) {
00961 LOAD3;
00962 STAT3;
00963 }
00964 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
00965 for (i = 0; i < count; i++) {
00966 LOAD3;
00967 STAT3;
00968 WRITE3;
00969 }
00970 } else {
00971 for (i = 0; i < count; i++) {
00972 LOAD3;
00973 WRITE3;
00974 }
00975 }
00976 return 0;
00977 }
00978
00979 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
00980 static void draw_slice(HYuvContext *s, int y)
00981 {
00982 int h, cy, i;
00983 int offset[AV_NUM_DATA_POINTERS];
00984
00985 if (s->avctx->draw_horiz_band==NULL)
00986 return;
00987
00988 h = y - s->last_slice_end;
00989 y -= h;
00990
00991 if (s->bitstream_bpp == 12) {
00992 cy = y>>1;
00993 } else {
00994 cy = y;
00995 }
00996
00997 offset[0] = s->picture.linesize[0]*y;
00998 offset[1] = s->picture.linesize[1]*cy;
00999 offset[2] = s->picture.linesize[2]*cy;
01000 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
01001 offset[i] = 0;
01002 emms_c();
01003
01004 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
01005
01006 s->last_slice_end = y + h;
01007 }
01008
01009 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
01010 AVPacket *avpkt)
01011 {
01012 const uint8_t *buf = avpkt->data;
01013 int buf_size = avpkt->size;
01014 HYuvContext *s = avctx->priv_data;
01015 const int width = s->width;
01016 const int width2 = s->width>>1;
01017 const int height = s->height;
01018 int fake_ystride, fake_ustride, fake_vstride;
01019 AVFrame * const p = &s->picture;
01020 int table_size = 0, ret;
01021
01022 AVFrame *picture = data;
01023
01024 av_fast_padded_malloc(&s->bitstream_buffer,
01025 &s->bitstream_buffer_size,
01026 buf_size);
01027 if (!s->bitstream_buffer)
01028 return AVERROR(ENOMEM);
01029
01030 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
01031 (const uint32_t*)buf, buf_size / 4);
01032
01033 if (p->data[0])
01034 ff_thread_release_buffer(avctx, p);
01035
01036 p->reference = 0;
01037 if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
01038 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01039 return ret;
01040 }
01041
01042 if (s->context) {
01043 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
01044 if (table_size < 0)
01045 return AVERROR_INVALIDDATA;
01046 }
01047
01048 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
01049 return AVERROR_INVALIDDATA;
01050
01051 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
01052 (buf_size-table_size) * 8);
01053
01054 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
01055 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
01056 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
01057
01058 s->last_slice_end = 0;
01059
01060 if (s->bitstream_bpp < 24) {
01061 int y, cy;
01062 int lefty, leftu, leftv;
01063 int lefttopy, lefttopu, lefttopv;
01064
01065 if (s->yuy2) {
01066 p->data[0][3] = get_bits(&s->gb, 8);
01067 p->data[0][2] = get_bits(&s->gb, 8);
01068 p->data[0][1] = get_bits(&s->gb, 8);
01069 p->data[0][0] = get_bits(&s->gb, 8);
01070
01071 av_log(avctx, AV_LOG_ERROR,
01072 "YUY2 output is not implemented yet\n");
01073 return AVERROR_PATCHWELCOME;
01074 } else {
01075
01076 leftv = p->data[2][0] = get_bits(&s->gb, 8);
01077 lefty = p->data[0][1] = get_bits(&s->gb, 8);
01078 leftu = p->data[1][0] = get_bits(&s->gb, 8);
01079 p->data[0][0] = get_bits(&s->gb, 8);
01080
01081 switch (s->predictor) {
01082 case LEFT:
01083 case PLANE:
01084 decode_422_bitstream(s, width-2);
01085 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
01086 if (!(s->flags&CODEC_FLAG_GRAY)) {
01087 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
01088 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
01089 }
01090
01091 for (cy = y = 1; y < s->height; y++, cy++) {
01092 uint8_t *ydst, *udst, *vdst;
01093
01094 if (s->bitstream_bpp == 12) {
01095 decode_gray_bitstream(s, width);
01096
01097 ydst = p->data[0] + p->linesize[0] * y;
01098
01099 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01100 if (s->predictor == PLANE) {
01101 if (y > s->interlaced)
01102 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01103 }
01104 y++;
01105 if (y >= s->height) break;
01106 }
01107
01108 draw_slice(s, y);
01109
01110 ydst = p->data[0] + p->linesize[0]*y;
01111 udst = p->data[1] + p->linesize[1]*cy;
01112 vdst = p->data[2] + p->linesize[2]*cy;
01113
01114 decode_422_bitstream(s, width);
01115 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
01116 if (!(s->flags & CODEC_FLAG_GRAY)) {
01117 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
01118 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
01119 }
01120 if (s->predictor == PLANE) {
01121 if (cy > s->interlaced) {
01122 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
01123 if (!(s->flags & CODEC_FLAG_GRAY)) {
01124 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
01125 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
01126 }
01127 }
01128 }
01129 }
01130 draw_slice(s, height);
01131
01132 break;
01133 case MEDIAN:
01134
01135 decode_422_bitstream(s, width - 2);
01136 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
01137 if (!(s->flags & CODEC_FLAG_GRAY)) {
01138 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
01139 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
01140 }
01141
01142 cy = y = 1;
01143
01144
01145 if (s->interlaced) {
01146 decode_422_bitstream(s, width);
01147 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
01148 if (!(s->flags & CODEC_FLAG_GRAY)) {
01149 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
01150 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
01151 }
01152 y++; cy++;
01153 }
01154
01155
01156 decode_422_bitstream(s, 4);
01157 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
01158 if (!(s->flags&CODEC_FLAG_GRAY)) {
01159 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
01160 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
01161 }
01162
01163
01164 lefttopy = p->data[0][3];
01165 decode_422_bitstream(s, width - 4);
01166 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
01167 if (!(s->flags&CODEC_FLAG_GRAY)) {
01168 lefttopu = p->data[1][1];
01169 lefttopv = p->data[2][1];
01170 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
01171 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
01172 }
01173 y++; cy++;
01174
01175 for (; y<height; y++, cy++) {
01176 uint8_t *ydst, *udst, *vdst;
01177
01178 if (s->bitstream_bpp == 12) {
01179 while (2 * cy > y) {
01180 decode_gray_bitstream(s, width);
01181 ydst = p->data[0] + p->linesize[0] * y;
01182 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01183 y++;
01184 }
01185 if (y >= height) break;
01186 }
01187 draw_slice(s, y);
01188
01189 decode_422_bitstream(s, width);
01190
01191 ydst = p->data[0] + p->linesize[0] * y;
01192 udst = p->data[1] + p->linesize[1] * cy;
01193 vdst = p->data[2] + p->linesize[2] * cy;
01194
01195 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
01196 if (!(s->flags & CODEC_FLAG_GRAY)) {
01197 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
01198 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
01199 }
01200 }
01201
01202 draw_slice(s, height);
01203 break;
01204 }
01205 }
01206 } else {
01207 int y;
01208 int leftr, leftg, leftb, lefta;
01209 const int last_line = (height - 1) * p->linesize[0];
01210
01211 if (s->bitstream_bpp == 32) {
01212 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
01213 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
01214 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
01215 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
01216 } else {
01217 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
01218 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
01219 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
01220 lefta = p->data[0][last_line+A] = 255;
01221 skip_bits(&s->gb, 8);
01222 }
01223
01224 if (s->bgr32) {
01225 switch (s->predictor) {
01226 case LEFT:
01227 case PLANE:
01228 decode_bgr_bitstream(s, width - 1);
01229 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
01230
01231 for (y = s->height - 2; y >= 0; y--) {
01232 decode_bgr_bitstream(s, width);
01233
01234 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
01235 if (s->predictor == PLANE) {
01236 if (s->bitstream_bpp != 32) lefta = 0;
01237 if ((y & s->interlaced) == 0 &&
01238 y < s->height - 1 - s->interlaced) {
01239 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
01240 p->data[0] + p->linesize[0] * y +
01241 fake_ystride, fake_ystride);
01242 }
01243 }
01244 }
01245
01246 draw_slice(s, height);
01247 break;
01248 default:
01249 av_log(avctx, AV_LOG_ERROR,
01250 "prediction type not supported!\n");
01251 }
01252 }else{
01253 av_log(avctx, AV_LOG_ERROR,
01254 "BGR24 output is not implemented yet\n");
01255 return AVERROR_PATCHWELCOME;
01256 }
01257 }
01258 emms_c();
01259
01260 *picture = *p;
01261 *got_frame = 1;
01262
01263 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
01264 }
01265 #endif
01266
01267 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
01268 static av_cold int decode_end(AVCodecContext *avctx)
01269 {
01270 HYuvContext *s = avctx->priv_data;
01271 int i;
01272
01273 if (s->picture.data[0])
01274 avctx->release_buffer(avctx, &s->picture);
01275
01276 common_end(s);
01277 av_freep(&s->bitstream_buffer);
01278
01279 for (i = 0; i < 6; i++) {
01280 ff_free_vlc(&s->vlc[i]);
01281 }
01282
01283 return 0;
01284 }
01285 #endif
01286
01287 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
01288 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
01289 const AVFrame *pict, int *got_packet)
01290 {
01291 HYuvContext *s = avctx->priv_data;
01292 const int width = s->width;
01293 const int width2 = s->width>>1;
01294 const int height = s->height;
01295 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
01296 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
01297 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
01298 AVFrame * const p = &s->picture;
01299 int i, j, size = 0, ret;
01300
01301 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
01302 return ret;
01303
01304 *p = *pict;
01305 p->pict_type = AV_PICTURE_TYPE_I;
01306 p->key_frame = 1;
01307
01308 if (s->context) {
01309 for (i = 0; i < 3; i++) {
01310 ff_huff_gen_len_table(s->len[i], s->stats[i]);
01311 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
01312 return -1;
01313 size += store_table(s, s->len[i], &pkt->data[size]);
01314 }
01315
01316 for (i = 0; i < 3; i++)
01317 for (j = 0; j < 256; j++)
01318 s->stats[i][j] >>= 1;
01319 }
01320
01321 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
01322
01323 if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
01324 avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
01325 int lefty, leftu, leftv, y, cy;
01326
01327 put_bits(&s->pb, 8, leftv = p->data[2][0]);
01328 put_bits(&s->pb, 8, lefty = p->data[0][1]);
01329 put_bits(&s->pb, 8, leftu = p->data[1][0]);
01330 put_bits(&s->pb, 8, p->data[0][0]);
01331
01332 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
01333 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
01334 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
01335
01336 encode_422_bitstream(s, 2, width-2);
01337
01338 if (s->predictor==MEDIAN) {
01339 int lefttopy, lefttopu, lefttopv;
01340 cy = y = 1;
01341 if (s->interlaced) {
01342 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
01343 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
01344 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
01345
01346 encode_422_bitstream(s, 0, width);
01347 y++; cy++;
01348 }
01349
01350 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
01351 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
01352 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
01353
01354 encode_422_bitstream(s, 0, 4);
01355
01356 lefttopy = p->data[0][3];
01357 lefttopu = p->data[1][1];
01358 lefttopv = p->data[2][1];
01359 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
01360 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
01361 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
01362 encode_422_bitstream(s, 0, width - 4);
01363 y++; cy++;
01364
01365 for (; y < height; y++,cy++) {
01366 uint8_t *ydst, *udst, *vdst;
01367
01368 if (s->bitstream_bpp == 12) {
01369 while (2 * cy > y) {
01370 ydst = p->data[0] + p->linesize[0] * y;
01371 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01372 encode_gray_bitstream(s, width);
01373 y++;
01374 }
01375 if (y >= height) break;
01376 }
01377 ydst = p->data[0] + p->linesize[0] * y;
01378 udst = p->data[1] + p->linesize[1] * cy;
01379 vdst = p->data[2] + p->linesize[2] * cy;
01380
01381 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
01382 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
01383 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
01384
01385 encode_422_bitstream(s, 0, width);
01386 }
01387 } else {
01388 for (cy = y = 1; y < height; y++, cy++) {
01389 uint8_t *ydst, *udst, *vdst;
01390
01391
01392 if (s->bitstream_bpp == 12) {
01393 ydst = p->data[0] + p->linesize[0] * y;
01394
01395 if (s->predictor == PLANE && s->interlaced < y) {
01396 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01397
01398 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01399 } else {
01400 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01401 }
01402 encode_gray_bitstream(s, width);
01403 y++;
01404 if (y >= height) break;
01405 }
01406
01407 ydst = p->data[0] + p->linesize[0] * y;
01408 udst = p->data[1] + p->linesize[1] * cy;
01409 vdst = p->data[2] + p->linesize[2] * cy;
01410
01411 if (s->predictor == PLANE && s->interlaced < cy) {
01412 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
01413 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
01414 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
01415
01416 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
01417 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
01418 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
01419 } else {
01420 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
01421 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
01422 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
01423 }
01424
01425 encode_422_bitstream(s, 0, width);
01426 }
01427 }
01428 } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
01429 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
01430 const int stride = -p->linesize[0];
01431 const int fake_stride = -fake_ystride;
01432 int y;
01433 int leftr, leftg, leftb, lefta;
01434
01435 put_bits(&s->pb, 8, lefta = data[A]);
01436 put_bits(&s->pb, 8, leftr = data[R]);
01437 put_bits(&s->pb, 8, leftg = data[G]);
01438 put_bits(&s->pb, 8, leftb = data[B]);
01439
01440 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
01441 encode_bgra_bitstream(s, width - 1, 4);
01442
01443 for (y = 1; y < s->height; y++) {
01444 uint8_t *dst = data + y*stride;
01445 if (s->predictor == PLANE && s->interlaced < y) {
01446 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
01447 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
01448 } else {
01449 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
01450 }
01451 encode_bgra_bitstream(s, width, 4);
01452 }
01453 }else if(avctx->pix_fmt == AV_PIX_FMT_RGB24){
01454 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
01455 const int stride = -p->linesize[0];
01456 const int fake_stride = -fake_ystride;
01457 int y;
01458 int leftr, leftg, leftb;
01459
01460 put_bits(&s->pb, 8, leftr= data[0]);
01461 put_bits(&s->pb, 8, leftg= data[1]);
01462 put_bits(&s->pb, 8, leftb= data[2]);
01463 put_bits(&s->pb, 8, 0);
01464
01465 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
01466 encode_bgra_bitstream(s, width-1, 3);
01467
01468 for(y=1; y<s->height; y++){
01469 uint8_t *dst = data + y*stride;
01470 if(s->predictor == PLANE && s->interlaced < y){
01471 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
01472 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
01473 }else{
01474 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
01475 }
01476 encode_bgra_bitstream(s, width, 3);
01477 }
01478 } else {
01479 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
01480 }
01481 emms_c();
01482
01483 size += (put_bits_count(&s->pb) + 31) / 8;
01484 put_bits(&s->pb, 16, 0);
01485 put_bits(&s->pb, 15, 0);
01486 size /= 4;
01487
01488 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
01489 int j;
01490 char *p = avctx->stats_out;
01491 char *end = p + 1024*30;
01492 for (i = 0; i < 3; i++) {
01493 for (j = 0; j < 256; j++) {
01494 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
01495 p += strlen(p);
01496 s->stats[i][j]= 0;
01497 }
01498 snprintf(p, end-p, "\n");
01499 p++;
01500 }
01501 } else
01502 avctx->stats_out[0] = '\0';
01503 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
01504 flush_put_bits(&s->pb);
01505 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
01506 }
01507
01508 s->picture_number++;
01509
01510 pkt->size = size * 4;
01511 pkt->flags |= AV_PKT_FLAG_KEY;
01512 *got_packet = 1;
01513
01514 return 0;
01515 }
01516
01517 static av_cold int encode_end(AVCodecContext *avctx)
01518 {
01519 HYuvContext *s = avctx->priv_data;
01520
01521 common_end(s);
01522
01523 av_freep(&avctx->extradata);
01524 av_freep(&avctx->stats_out);
01525
01526 return 0;
01527 }
01528 #endif
01529
01530 #if CONFIG_HUFFYUV_DECODER
01531 AVCodec ff_huffyuv_decoder = {
01532 .name = "huffyuv",
01533 .type = AVMEDIA_TYPE_VIDEO,
01534 .id = AV_CODEC_ID_HUFFYUV,
01535 .priv_data_size = sizeof(HYuvContext),
01536 .init = decode_init,
01537 .close = decode_end,
01538 .decode = decode_frame,
01539 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01540 CODEC_CAP_FRAME_THREADS,
01541 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01542 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01543 };
01544 #endif
01545
01546 #if CONFIG_FFVHUFF_DECODER
01547 AVCodec ff_ffvhuff_decoder = {
01548 .name = "ffvhuff",
01549 .type = AVMEDIA_TYPE_VIDEO,
01550 .id = AV_CODEC_ID_FFVHUFF,
01551 .priv_data_size = sizeof(HYuvContext),
01552 .init = decode_init,
01553 .close = decode_end,
01554 .decode = decode_frame,
01555 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
01556 CODEC_CAP_FRAME_THREADS,
01557 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
01558 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01559 };
01560 #endif
01561
01562 #if CONFIG_HUFFYUV_ENCODER
01563 AVCodec ff_huffyuv_encoder = {
01564 .name = "huffyuv",
01565 .type = AVMEDIA_TYPE_VIDEO,
01566 .id = AV_CODEC_ID_HUFFYUV,
01567 .priv_data_size = sizeof(HYuvContext),
01568 .init = encode_init,
01569 .encode2 = encode_frame,
01570 .close = encode_end,
01571 .pix_fmts = (const enum AVPixelFormat[]){
01572 AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
01573 },
01574 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
01575 };
01576 #endif
01577
01578 #if CONFIG_FFVHUFF_ENCODER
01579 AVCodec ff_ffvhuff_encoder = {
01580 .name = "ffvhuff",
01581 .type = AVMEDIA_TYPE_VIDEO,
01582 .id = AV_CODEC_ID_FFVHUFF,
01583 .priv_data_size = sizeof(HYuvContext),
01584 .init = encode_init,
01585 .encode2 = encode_frame,
01586 .close = encode_end,
01587 .pix_fmts = (const enum AVPixelFormat[]){
01588 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
01589 },
01590 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
01591 };
01592 #endif