67 const AVFrame *pict,
int *got_packet)
70 int n_bytes_image, n_bytes_per_row, n_bytes,
i,
n, hsize,
ret;
71 const uint32_t *pal =
NULL;
72 uint32_t palette256[256];
73 int pad_bytes_per_row, pal_entries = 0, compression =
BMP_RGB;
77 #if FF_API_CODED_FRAME 104 pal = (uint32_t *)p->
data[1];
110 if (pal && !pal_entries) pal_entries = 1 << bit_count;
111 n_bytes_per_row = ((int64_t)avctx->
width * (int64_t)bit_count + 7LL) >> 3LL;
112 pad_bytes_per_row = (4 - n_bytes_per_row) & 3;
113 n_bytes_image = avctx->
height * (n_bytes_per_row + pad_bytes_per_row);
117 #define SIZE_BITMAPFILEHEADER 14 118 #define SIZE_BITMAPINFOHEADER 40 120 n_bytes = n_bytes_image + hsize;
124 bytestream_put_byte(&buf,
'B');
125 bytestream_put_byte(&buf,
'M');
126 bytestream_put_le32(&buf, n_bytes);
127 bytestream_put_le16(&buf, 0);
128 bytestream_put_le16(&buf, 0);
129 bytestream_put_le32(&buf, hsize);
131 bytestream_put_le32(&buf, avctx->
width);
132 bytestream_put_le32(&buf, avctx->
height);
133 bytestream_put_le16(&buf, 1);
134 bytestream_put_le16(&buf, bit_count);
135 bytestream_put_le32(&buf, compression);
136 bytestream_put_le32(&buf, n_bytes_image);
137 bytestream_put_le32(&buf, 0);
138 bytestream_put_le32(&buf, 0);
139 bytestream_put_le32(&buf, 0);
140 bytestream_put_le32(&buf, 0);
141 for (i = 0; i < pal_entries; i++)
142 bytestream_put_le32(&buf, pal[i] & 0xFFFFFF);
145 buf = pkt->
data + hsize;
146 for(i = 0; i < avctx->
height; i++) {
147 if (bit_count == 16) {
148 const uint16_t *
src = (
const uint16_t *) ptr;
149 uint16_t *dst = (uint16_t *) buf;
150 for(n = 0; n < avctx->
width; n++)
153 memcpy(buf, ptr, n_bytes_per_row);
155 buf += n_bytes_per_row;
156 memset(buf, 0, pad_bytes_per_row);
157 buf += pad_bytes_per_row;
This structure describes decoded (raw) audio or video data.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_RGB444
#define SIZE_BITMAPINFOHEADER
static const uint32_t rgb565_masks[]
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
8 bits with AV_PIX_FMT_RGB32 palette
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define SIZE_BITMAPFILEHEADER
#define i(width, name, range_min, range_max)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
simple assert() macros that are a bit more flexible than ISO C assert().
static const uint32_t monoblack_pal[]
const char * name
Name of the codec implementation.
int flags
A combination of AV_PKT_FLAG values.
enum AVPictureType pict_type
Picture type of the frame.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
int width
picture width / height.
packed RGB 8:8:8, 24bpp, BGRBGR...
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define AV_LOG_INFO
Standard information.
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
main external API structure.
static av_cold int bmp_encode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
#define AV_PIX_FMT_RGB555
#define FF_ENABLE_DEPRECATION_WARNINGS
int key_frame
1 -> keyframe, 0-> not
#define AV_PIX_FMT_RGB565
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
This structure stores compressed data.
static const uint32_t rgb444_masks[]