Go to the documentation of this file.
46 #include <theora/theoraenc.h>
81 if (packet->bytes < 0) {
82 message =
"ogg_packet has negative size";
83 }
else if (packet->bytes > 0xffff) {
84 message =
"ogg_packet is larger than 65535 bytes";
85 }
else if (newsize < avc_context->extradata_size) {
86 message =
"extradata_size would overflow";
101 memcpy(avc_context->
extradata + (*
offset), packet->packet, packet->bytes);
102 (*offset) += packet->bytes;
108 #ifdef TH_ENCCTL_2PASS_OUT
113 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_OUT, &buf,
sizeof(buf));
120 h->stats_offset + bytes);
124 memcpy(
h->stats +
h->stats_offset, buf, bytes);
125 h->stats_offset += bytes;
129 memcpy(
h->stats, buf, bytes);
146 #ifdef TH_ENCCTL_2PASS_IN
154 h->stats_size = strlen(avctx->
stats_in) * 3/4;
162 while (
h->stats_size -
h->stats_offset > 0) {
163 bytes = th_encode_ctl(
h->t_state, TH_ENCCTL_2PASS_IN,
164 h->stats +
h->stats_offset,
165 h->stats_size -
h->stats_offset);
172 h->stats_offset += bytes;
184 th_comment t_comment;
192 th_info_init(&t_info);
195 t_info.pic_width = avc_context->
width;
196 t_info.pic_height = avc_context->
height;
207 t_info.aspect_numerator = 1;
208 t_info.aspect_denominator = 1;
212 t_info.colorspace = TH_CS_ITU_REC_470M;
214 t_info.colorspace = TH_CS_ITU_REC_470BG;
216 t_info.colorspace = TH_CS_UNSPECIFIED;
219 t_info.pixel_fmt = TH_PF_420;
221 t_info.pixel_fmt = TH_PF_422;
223 t_info.pixel_fmt = TH_PF_444;
239 t_info.target_bitrate = 0;
241 t_info.target_bitrate = avc_context->
bit_rate;
246 h->t_state = th_encode_alloc(&t_info);
254 th_info_clear(&t_info);
256 if (th_encode_ctl(
h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
263 if (
h->speed_level != -1) {
265 int speed_level =
h->speed_level;
266 th_encode_ctl(
h->t_state, TH_ENCCTL_GET_SPLEVEL_MAX, &max_speed_level,
sizeof(max_speed_level));
267 speed_level =
FFMIN(speed_level, max_speed_level);
268 th_encode_ctl(
h->t_state, TH_ENCCTL_SET_SPLEVEL, &speed_level,
sizeof(speed_level));
290 th_comment_init(&t_comment);
292 while (th_encode_flushheader(
h->t_state, &t_comment, &o_packet))
296 th_comment_clear(&t_comment);
304 th_ycbcr_buffer t_yuv_buffer;
311 th_encode_packetout(
h->t_state, 1, &o_packet);
319 for (
i = 0;
i < 3;
i++) {
320 t_yuv_buffer[
i].width =
FFALIGN(avc_context->
width, 16) >> (
i &&
h->uv_hshift);
321 t_yuv_buffer[
i].height =
FFALIGN(avc_context->
height, 16) >> (
i &&
h->uv_vshift);
322 t_yuv_buffer[
i].stride =
frame->linesize[
i];
323 t_yuv_buffer[
i].data =
frame->data[
i];
331 result = th_encode_ycbcr_in(
h->t_state, t_yuv_buffer);
336 message =
"differing frame sizes";
339 message =
"encoder is not ready or is finished";
354 result = th_encode_packetout(
h->t_state, 0, &o_packet);
370 memcpy(
pkt->
data, o_packet.packet, o_packet.bytes);
381 if (!(o_packet.granulepos &
h->keyframe_mask))
392 th_encode_free(
h->t_state);
402 .
p.
name =
"libtheora",
418 .p.wrapper_name =
"libtheora",
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
#define CODEC_PIXFMTS(...)
static av_cold int encode_close(AVCodecContext *avc_context)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
static const AVOption options[]
This structure describes decoded (raw) audio or video data.
#define FF_CODEC_CAP_NOT_INIT_THREADSAFE
The codec is not known to be init-threadsafe (i.e.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static av_cold void close(AVCodecParserContext *s)
AVCodec p
The public AVCodec.
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define FF_CODEC_ENCODE_CB(func)
static int ogg_packet(AVFormatContext *s, int *sid, int *dstart, int *dsize, int64_t *fpos)
find the next Ogg packet
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
int global_quality
Global quality for codecs which cannot change it per frame.
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
int64_t bit_rate
the average bitrate
const char * av_default_item_name(void *ptr)
Return the context name.
AVClass * av_class
class for AVOptions
static int encode_frame(AVCodecContext *avc_context, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
#define AV_OPT_FLAG_ENCODING_PARAM
A generic parameter which can be set by the user for muxing or encoding.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
char * stats_out
pass1 encoding statistics output buffer
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define AVERROR_EXTERNAL
Generic error in an external library.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define AV_BASE64_SIZE(x)
Calculate the output size needed to base64-encode x bytes to a null-terminated string.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
const FFCodec ff_libtheora_encoder
AVCodec struct exposed to libavcodec.
#define AV_OPT_FLAG_VIDEO_PARAM
const char * name
Name of the codec implementation.
static int concatenate_packet(unsigned int *offset, AVCodecContext *avc_context, const ogg_packet *packet)
Concatenate an ogg_packet into the extradata.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int encode_init(AVCodecContext *avc_context)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
@ AV_OPT_TYPE_INT
Underlying C type is int.
static int get_stats(AVCodecContext *avctx, int eos)
char * av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size)
Encode data to base64 and null-terminate.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static int submit_stats(AVCodecContext *avctx)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int FUNC() message(CodedBitstreamContext *ctx, RWContext *rw, SEIRawMessage *current)
This structure stores compressed data.
static const AVClass theora_class
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.