Go to the documentation of this file.
52 int packet_types_received[32];
57 #define COUNT_NAL_TYPE(data, nal) data->packet_types_received[(nal) & 0x1f]++
58 #define NAL_COUNTERS data->packet_types_received
60 #define COUNT_NAL_TYPE(data, nal) do { } while (0)
61 #define NAL_COUNTERS NULL
89 "RTP Profile IDC: %x Profile IOP: %x Level: %x\n",
97 uint8_t **data_ptr,
int *size_ptr,
100 char base64packet[1024];
101 uint8_t decoded_packet[1024];
105 char *dst = base64packet;
108 && (dst - base64packet) <
sizeof(base64packet) - 1) {
117 sizeof(decoded_packet));
118 if (packet_size > 0) {
125 "Unable to allocate memory for extradata!\n");
133 decoded_packet, packet_size);
147 const char *attr,
const char *
value)
151 if (!strcmp(attr,
"packetization-mode")) {
163 "Interleaved RTP mode is not supported yet.\n");
164 }
else if (!strcmp(attr,
"profile-level-id")) {
165 if (strlen(
value) == 6)
167 }
else if (!strcmp(attr,
"sprop-parameter-sets")) {
190 while (*p && *p ==
' ')
192 while (*p && *p !=
' ')
194 while (*p && *p ==
' ')
196 while (*p && *p !=
'-' && (dst - buf1) <
sizeof(buf1) - 1)
202 par->
width = atoi(buf1);
203 par->
height = atoi(p + 1);
207 const uint8_t *buf,
int len,
208 int skip_between,
int *nal_counters,
212 int total_length = 0;
218 const uint8_t *
src = buf;
221 while (src_len > 2) {
228 if (nal_size <= src_len) {
236 memcpy(dst,
src, nal_size);
238 nal_counters[(*src) & nal_mask]++;
243 "nal size exceeds length: %d %d\n", nal_size, src_len);
248 src += nal_size + skip_between;
249 src_len -= nal_size + skip_between;
265 int start_bit,
const uint8_t *nal_header,
278 memcpy(
pkt->
data +
pos, nal_header, nal_header_len);
279 pos += nal_header_len;
286 const uint8_t *buf,
int len,
287 int *nal_counters,
int nal_mask)
289 uint8_t fu_indicator, fu_header, start_bit, nal_type, nal;
296 fu_indicator = buf[0];
298 start_bit = fu_header >> 7;
299 nal_type = fu_header & 0x1f;
300 nal = fu_indicator & 0xe0 | nal_type;
306 if (start_bit && nal_counters)
307 nal_counters[nal_type & nal_mask]++;
314 const uint8_t *buf,
int len, uint16_t seq,
381 for (ii = 0; ii < 32; ii++) {
382 if (
data->packet_types_received[ii])
384 data->packet_types_received[ii], ii);
393 const char *p =
line;
398 stream =
s->streams[st_index];
#define AV_LOG_WARNING
Something somehow does not look correct.
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
This struct describes the properties of an encoded stream.
int ff_parse_fmtp(AVFormatContext *s, AVStream *stream, PayloadContext *data, const char *p, int(*parse_fmtp)(AVFormatContext *s, AVStream *stream, PayloadContext *data, const char *attr, const char *value))
void ff_h264_parse_framesize(AVCodecParameters *par, const char *p)
const RTPDynamicProtocolHandler ff_h264_dynamic_handler
static void parse_profile_level_id(AVFormatContext *s, PayloadContext *h264_data, const char *value)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define COUNT_NAL_TYPE(data, nal)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags)
int ff_h264_parse_sprop_parameter_sets(AVFormatContext *s, uint8_t **data_ptr, int *size_ptr, const char *value)
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
Decode a base64-encoded string.
int extradata_size
Size of the extradata content in bytes.
static int sdp_parse_fmtp_config_h264(AVFormatContext *s, AVStream *stream, PayloadContext *h264_data, const char *attr, const char *value)
int ff_h264_handle_frag_packet(AVPacket *pkt, const uint8_t *buf, int len, int start_bit, const uint8_t *nal_header, int nal_header_len)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int ff_h264_handle_aggregated_packet(AVFormatContext *ctx, PayloadContext *data, AVPacket *pkt, const uint8_t *buf, int len, int skip_between, int *nal_counters, int nal_mask)
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static const uint8_t start_sequence[]
#define AV_INPUT_BUFFER_PADDING_SIZE
int index
stream index in AVFormatContext
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
static void h264_close_context(PayloadContext *data)
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index, int flush)
Parse a packet, add all split parts to parse_queue.
static int h264_handle_packet_fu_a(AVFormatContext *ctx, PayloadContext *data, AVPacket *pkt, const uint8_t *buf, int len, int *nal_counters, int nal_mask)
This structure stores compressed data.
#define flags(name, subs,...)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
RTP/JPEG specific private data.
static int parse_h264_sdp_line(AVFormatContext *s, int st_index, PayloadContext *h264_data, const char *line)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.