Go to the documentation of this file.
22 #include "config_components.h"
37 const AVFrame *p,
int *got_packet)
40 uint8_t *bytestream, *bytestream_start, *bytestream_end;
41 int i,
h, h1,
c, n, linesize,
ret;
60 n = (avctx->
width + 7) >> 3;
106 n = avctx->
width * 4;
109 n = avctx->
width * 2;
115 snprintf(bytestream, bytestream_end - bytestream,
116 "P%c\n%d %d\n",
c, avctx->
width, h1);
117 bytestream += strlen(bytestream);
122 snprintf(bytestream, bytestream_end - bytestream,
125 bytestream += strlen(bytestream);
132 snprintf(bytestream, bytestream_end - bytestream,
134 bytestream += strlen(bytestream);
144 for (
int i = 0;
i < avctx->
height;
i++) {
145 for (
int j = 0; j < avctx->
width; j++) {
161 for (
int i = 0;
i < avctx->
height;
i++) {
162 for (
int j = 0; j < avctx->
width; j++) {
170 const float *
r = (
const float *)p->
data[2];
171 const float *
g = (
const float *)p->
data[0];
172 const float *
b = (
const float *)p->
data[1];
175 for (
int j = 0; j < avctx->
width; j++) {
187 const float *
g = (
const float *)p->
data[0];
190 for (
int j = 0; j < avctx->
width; j++) {
198 const uint8_t *ptr = p->
data[0];
200 for (
i = 0;
i <
h;
i++) {
201 memcpy(bytestream, ptr, n);
208 const uint8_t *ptr1 = p->
data[1], *ptr2 = p->
data[2];
211 for (
i = 0;
i <
h;
i++) {
212 memcpy(bytestream, ptr1, n);
214 memcpy(bytestream, ptr2, n);
226 #if CONFIG_PGM_ENCODER
240 #if CONFIG_PGMYUV_ENCODER
254 #if CONFIG_PPM_ENCODER
268 #if CONFIG_PBM_ENCODER
281 #if CONFIG_PFM_ENCODER
297 #if CONFIG_PHM_ENCODER
314 .
init = phm_enc_init,
const FFCodec ff_pbm_encoder
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_init_float2half_tables(Float2HalfTables *t)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
int depth
Number of bits in the component.
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
@ AV_PIX_FMT_GRAYF32LE
IEEE-754 single precision Y, 32bpp, little-endian.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
AVCodec p
The public AVCodec.
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
#define FF_CODEC_ENCODE_CB(func)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAYF32
#define CODEC_LONG_NAME(str)
const FFCodec ff_pfm_encoder
const FFCodec ff_phm_encoder
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const FFCodec ff_ppm_encoder
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_GBRPF32
const FFCodec ff_pgmyuv_encoder
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
const FFCodec ff_pgm_encoder
Float2HalfTables f2h_tables
@ AV_PIX_FMT_GRAYF32BE
IEEE-754 single precision Y, 32bpp, big-endian.
static uint16_t float2half(uint32_t f, const Float2HalfTables *t)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
@ AV_PIX_FMT_YUV420P16BE
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet)