Go to the documentation of this file.
43 #include <theora/theoraenc.h>
64 if (packet->bytes < 0) {
65 message =
"ogg_packet has negative size";
66 }
else if (packet->bytes > 0xffff) {
67 message =
"ogg_packet is larger than 65535 bytes";
68 }
else if (newsize < avc_context->extradata_size) {
69 message =
"extradata_size would overflow";
84 memcpy(avc_context->
extradata + (*
offset), packet->packet, packet->bytes);
85 (*offset) += packet->bytes;
91 #ifdef TH_ENCCTL_2PASS_OUT
96 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_OUT, &
buf,
sizeof(
buf));
103 h->stats_offset + bytes);
107 memcpy(
h->stats +
h->stats_offset,
buf, bytes);
108 h->stats_offset += bytes;
112 memcpy(
h->stats,
buf, bytes);
129 #ifdef TH_ENCCTL_2PASS_IN
137 h->stats_size = strlen(avctx->
stats_in) * 3/4;
145 while (
h->stats_size -
h->stats_offset > 0) {
146 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_IN,
147 h->stats +
h->stats_offset,
148 h->stats_size -
h->stats_offset);
155 h->stats_offset += bytes;
167 th_comment t_comment;
175 th_info_init(&t_info);
178 t_info.pic_width = avc_context->
width;
179 t_info.pic_height = avc_context->
height;
190 t_info.aspect_numerator = 1;
191 t_info.aspect_denominator = 1;
195 t_info.colorspace = TH_CS_ITU_REC_470M;
197 t_info.colorspace = TH_CS_ITU_REC_470BG;
199 t_info.colorspace = TH_CS_UNSPECIFIED;
202 t_info.pixel_fmt = TH_PF_420;
204 t_info.pixel_fmt = TH_PF_422;
206 t_info.pixel_fmt = TH_PF_444;
222 t_info.target_bitrate = 0;
224 t_info.target_bitrate = avc_context->
bit_rate;
229 h->t_state = th_encode_alloc(&t_info);
235 h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
237 th_info_clear(&t_info);
239 if (th_encode_ctl(
h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
264 th_comment_init(&t_comment);
266 while (th_encode_flushheader(
h->t_state, &t_comment, &o_packet))
270 th_comment_clear(&t_comment);
278 th_ycbcr_buffer t_yuv_buffer;
285 th_encode_packetout(
h->t_state, 1, &o_packet);
293 for (
i = 0;
i < 3;
i++) {
294 t_yuv_buffer[
i].width =
FFALIGN(avc_context->
width, 16) >> (
i &&
h->uv_hshift);
295 t_yuv_buffer[
i].height =
FFALIGN(avc_context->
height, 16) >> (
i &&
h->uv_vshift);
296 t_yuv_buffer[
i].stride =
frame->linesize[
i];
297 t_yuv_buffer[
i].data =
frame->data[
i];
305 result = th_encode_ycbcr_in(
h->t_state, t_yuv_buffer);
310 message =
"differing frame sizes";
313 message =
"encoder is not ready or is finished";
328 result = th_encode_packetout(
h->t_state, 0, &o_packet);
344 memcpy(
pkt->
data, o_packet.packet, o_packet.bytes);
349 #if FF_API_CODED_FRAME
354 if (!(o_packet.granulepos &
h->keyframe_mask))
365 th_encode_free(
h->t_state);
388 .wrapper_name =
"libtheora",
#define FF_ENABLE_DEPRECATION_WARNINGS
static av_cold int encode_close(AVCodecContext *avc_context)
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
This structure describes decoded (raw) audio or video data.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
AVCodec ff_libtheora_encoder
AVCodec struct exposed to libavcodec.
int key_frame
1 -> keyframe, 0-> not
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize, int64_t *fpos)
find the next Ogg packet
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
int global_quality
Global quality for codecs which cannot change it per frame.
static enum AVPixelFormat pix_fmts[]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int64_t bit_rate
the average bitrate
static int encode_frame(AVCodecContext *avc_context, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
char * stats_out
pass1 encoding statistics output buffer
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define AVERROR_EXTERNAL
Generic error in an external library.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const char * name
Name of the codec implementation.
static int concatenate_packet(unsigned int *offset, AVCodecContext *avc_context, const ogg_packet *packet)
Concatenate an ogg_packet into the extradata.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int encode_init(AVCodecContext *avc_context)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
main external API structure.
static int get_stats(AVCodecContext *avctx, int eos)
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int submit_stats(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.