47 s->
hencdsp.
diff_int16((uint16_t *)dst, (
const uint16_t *)src0, (
const uint16_t *)src1, s->
n - 1, w);
57 for (i = 0; i < w; i++) {
58 const int temp = src[i];
64 for (i = 0; i < 32; i++) {
65 const int temp = src[i];
73 const uint16_t *src16 = (
const uint16_t *)src;
74 uint16_t *dst16 = ( uint16_t *)dst;
76 for (i = 0; i < w; i++) {
77 const int temp = src16[i];
78 dst16[i] = temp - left;
83 for (i = 0; i < 16; i++) {
84 const int temp = src16[i];
85 dst16[i] = temp - left;
96 int *red,
int *green,
int *blue,
106 for (i = 0; i <
FFMIN(w, 4); i++) {
107 const int rt = src[i * 4 +
R];
108 const int gt = src[i * 4 +
G];
109 const int bt = src[i * 4 +
B];
110 const int at = src[i * 4 +
A];
111 dst[i * 4 +
R] = rt -
r;
112 dst[i * 4 +
G] = gt -
g;
113 dst[i * 4 +
B] = bt -
b;
114 dst[i * 4 +
A] = at -
a;
123 *red = src[(w - 1) * 4 +
R];
124 *green = src[(w - 1) * 4 +
G];
125 *blue = src[(w - 1) * 4 +
B];
126 *alpha = src[(w - 1) * 4 +
A];
131 int *red,
int *green,
int *blue)
138 for (i = 0; i <
FFMIN(w, 16); i++) {
139 const int rt = src[i * 3 + 0];
140 const int gt = src[i * 3 + 1];
141 const int bt = src[i * 3 + 2];
142 dst[i * 3 + 0] = rt -
r;
143 dst[i * 3 + 1] = gt -
g;
144 dst[i * 3 + 2] = bt -
b;
152 *red = src[(w - 1) * 3 + 0];
153 *green = src[(w - 1) * 3 + 1];
154 *blue = src[(w - 1) * 3 + 2];
172 for (i = 0; i <
n;) {
176 for (; i < n && len[i] == val && repeat < 255; i++)
179 av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
182 buf[index++] = repeat;
184 buf[index++] = val | (repeat << 5);
200 for (i = 0; i <
count; i++) {
226 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
236 #if FF_API_CODED_FRAME
242 #if FF_API_PRIVATE_OPT
324 #if FF_API_PRIVATE_OPT
334 "context=1 is not compatible with "
335 "2 pass huffyuv encoding\n");
343 "Error: YV12 is not supported by huffyuv; use "
344 "vcodec=ffvhuff or format=422p\n");
347 #if FF_API_PRIVATE_OPT
350 "Error: per-frame huffman tables are not supported "
351 "by huffyuv; use vcodec=ffvhuff\n");
356 "Error: ver>2 is not supported "
357 "by huffyuv; use vcodec=ffvhuff\n");
363 "using huffyuv 2.2.0 or newer interlacing flag\n");
367 av_log(avctx,
AV_LOG_ERROR,
"Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
368 "Use vstrict=-2 / -strict -2 to use it anyway.\n");
374 "Error: RGB is incompatible with median predictor\n");
398 for (i = 0; i < 4; i++)
399 for (j = 0; j < s->
vlc_n; j++)
403 for (i = 0; i < 4; i++) {
406 for (j = 0; j < s->
vlc_n; j++) {
407 s->
stats[i][j] += strtol(p, &next, 0);
408 if (next == p)
return -1;
412 if (p[0] == 0 || p[1] == 0 || p[2] == 0)
break;
415 for (i = 0; i < 4; i++)
416 for (j = 0; j < s->
vlc_n; j++) {
419 s->
stats[i][j] = 100000000 / (d*d + 1);
429 for (i = 0; i < 4; i++) {
431 for (j = 0; j < s->
vlc_n; j++) {
433 s->
stats[i][j] = pels/(d*d + 1);
437 for (i = 0; i < 4; i++)
438 for (j = 0; j < s->
vlc_n; j++)
465 int y1 = y[2 * i + 1];\
472 for(i = 0; i <
count; i++) {
483 for (i = 0; i <
count; i++) {
495 for(i = 0; i <
count; i++) {
508 int i,
count = width/2;
516 int y0 = s->temp[0][width-1];
518 int y0 = s->temp16[0][width-1] & mask;
520 int y0 = s->temp16[0][width-1];
522 s->stats[plane][y0]++;
524 s->stats[plane][y0>>2]++;
526 put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
528 put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
529 put_bits(&s->pb, 2, y0&3);
532 int y0 = s->temp[0][2 * i];\
533 int y1 = s->temp[0][2 * i + 1];
535 int y0 = s->temp16[0][2 * i] & mask;\
536 int y1 = s->temp16[0][2 * i + 1] & mask;
538 int y0 = s->temp16[0][2 * i];\
539 int y1 = s->temp16[0][2 * i + 1];
541 s->stats[plane][y0]++;\
542 s->stats[plane][y1]++;
544 s->stats[plane][y0>>2]++;\
545 s->stats[plane][y1>>2]++;
547 put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
548 put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
550 put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
551 put_bits(&s->pb, 2, y0&3);\
552 put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
553 put_bits(&s->pb, 2, y1&3);
557 for (i = 0; i <
count; i++) {
570 for (i = 0; i <
count; i++) {
581 for (i = 0; i <
count; i++) {
590 }
else if (s->
bps <= 14) {
593 for (i = 0; i <
count; i++) {
606 for (i = 0; i <
count; i++) {
617 for (i = 0; i <
count; i++) {
628 for (i = 0; i <
count; i++) {
641 for (i = 0; i <
count; i++) {
652 for (i = 0; i <
count; i++) {
678 int y0 = s->temp[0][2 * i];\
679 int y1 = s->temp[0][2 * i + 1];
684 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
685 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
690 for (i = 0; i <
count; i++) {
699 for (i = 0; i <
count; i++) {
705 for (i = 0; i <
count; i++) {
718 4 * planes * count) {
724 int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
725 int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
726 int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
727 int a = s->temp[0][planes * i + A];
737 put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
738 put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
739 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
741 put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
745 for (i = 0; i <
count; i++) {
750 for (i = 0; i <
count; i++) {
756 for (i = 0; i <
count; i++) {
765 const AVFrame *pict,
int *got_packet)
769 const int width2 = s->
width>>1;
774 const AVFrame *
const p = pict;
775 int i, j,
size = 0, ret;
785 for (i = 0; i < 4; i++)
786 for (j = 0; j < s->
vlc_n; j++)
787 s->
stats[i][j] >>= 1;
794 int lefty, leftu, leftv, y, cy;
808 int lefttopy, lefttopu, lefttopv;
825 lefttopy = p->
data[0][3];
826 lefttopu = p->
data[1][1];
827 lefttopv = p->
data[2][1];
834 for (; y <
height; y++,cy++) {
844 if (y >= height)
break;
857 for (cy = y = 1; y <
height; y++, cy++) {
873 if (y >= height)
break;
900 const int fake_stride = -fake_ystride;
902 int leftr, leftg, leftb, lefta;
910 &leftr, &leftg, &leftb, &lefta);
913 for (y = 1; y < s->
height; y++) {
918 &leftr, &leftg, &leftb, &lefta);
921 &leftr, &leftg, &leftb, &lefta);
928 const int fake_stride = -fake_ystride;
930 int leftr, leftg, leftb;
938 &leftr, &leftg, &leftb);
941 for (y = 1; y < s->
height; y++) {
947 &leftr, &leftg, &leftb);
950 &leftr, &leftg, &leftb);
956 for (plane = 0; plane < 1 + 2*s->
chroma + s->
alpha; plane++) {
960 int fake_stride = fake_ystride;
962 if (s->
chroma && (plane == 1 || plane == 2)) {
965 fake_stride = plane == 1 ? fake_ustride : fake_vstride;
992 for (y = 1; y <
h; y++) {
1021 for (i = 0; i < 4; i++) {
1022 for (j = 0; j < s->
vlc_n; j++) {
1041 pkt->
size = size * 4;
1060 #define OFFSET(x) offsetof(HYuvContext, x)
1061 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1063 #define COMMON_OPTIONS \
1064 { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1065 OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
1067 { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1068 { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1069 { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1070 { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1107 .priv_class = &normal_class,
1116 #if CONFIG_FFVHUFF_ENCODER
1117 AVCodec ff_ffvhuff_encoder = {
1127 .priv_class = &ff_class,
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
const struct AVCodec * codec
const char const char void * val
#define AV_PIX_FMT_YUVA422P16
void(* sub_hfyu_median_pred_int16)(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w, int *left, int *left_top)
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static av_cold int encode_init(AVCodecContext *avctx)
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
static av_cold int init(AVCodecContext *avctx)
void(* diff_int16)(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w)
#define AV_PIX_FMT_GBRP10
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
LLVidEncDSPContext llvidencdsp
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, AVCodecContext *avctx)
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
void(* diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
static av_cold int end(AVCodecContext *avctx)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_PIX_FMT_YUVA420P9
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
attribute_deprecated int context_model
#define AV_PIX_FMT_YUV444P16
static void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue)
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define AV_PIX_FMT_YUV422P12
char * stats_out
pass1 encoding statistics output buffer
#define AV_PIX_FMT_YUVA420P16
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t len[4][MAX_VLC_N]
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static double alpha(void *priv, double x, double y)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold void ff_huffyuv_common_end(HYuvContext *s)
static const uint16_t mask[17]
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const AVClass ff_class
#define AV_PIX_FMT_YUVA444P16
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
static const AVClass normal_class
static const uint8_t offset[127][2]
huffyuv codec for libavcodec.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
#define AV_PIX_FMT_YUV422P9
uint8_t nb_components
The number of components each pixel has, (1-4)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
AVCodec ff_huffyuv_encoder
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
#define AV_PIX_FMT_YUVA444P10
static av_cold int encode_end(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_YUV420P16
#define AV_LOG_INFO
Standard information.
#define AV_PIX_FMT_YUV420P14
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
static int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Libavcodec external API header.
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
attribute_deprecated int prediction_method
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static const AVOption ff_options[]
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
huffman tree builder and VLC generator
static int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
#define AV_PIX_FMT_YUV420P9
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
HuffYUVEncDSPContext hencdsp
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
planar GBRA 4:4:4:4 32bpp
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
#define AV_PIX_FMT_YUVA444P9
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static int encode_gray_bitstream(HYuvContext *s, int count)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define FF_ENABLE_DEPRECATION_WARNINGS
int key_frame
1 -> keyframe, 0-> not
int flags2
AV_CODEC_FLAG2_*.
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
uint32_t bits[4][MAX_VLC_N]
uint64_t stats[4][MAX_VLC_N]
static const AVOption normal_options[]
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
#define AV_PIX_FMT_YUV422P16