92 #define PROMPEG_RTP_PT 0x60 93 #define PROMPEG_FEC_COL 0x0 94 #define PROMPEG_FEC_ROW 0x1 120 #define OFFSET(x) offsetof(PrompegContext, x) 121 #define E AV_OPT_FLAG_ENCODING_PARAM 124 {
"ttl",
"Time to live (in milliseconds, multicast only)",
OFFSET(ttl),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags =
E },
143 n = size /
sizeof (uint64_t);
144 s = n *
sizeof (uint64_t);
146 for (i = 0; i < n; i++) {
157 n = size /
sizeof (uint32_t);
158 s = n *
sizeof (uint32_t);
160 for (i = 0; i < n; i++) {
172 for (i = 0; i < n; i++) {
173 out[
i] = in1[
i] ^ in2[
i];
182 if (size < 12 || (buf[0] & 0xc0) != 0x80 || (buf[1] & 0x7f) != 0x21) {
199 b[0] = buf[0] & 0x3f;
229 buf[0] = 0x80 | (b[0] & 0x3f);
244 buf[16] = 0x80 | b[1];
280 if (s->
l * s->
d > 100) {
288 if (rtp_port < 1 || rtp_port > UINT16_MAX - 4) {
389 int col_idx, col_out_idx, row_idx;
477 .priv_data_class = &prompeg_class,
void av_url_split(char *proto, int proto_size, char *authorization, int authorization_size, char *hostname, int hostname_size, int *port_ptr, char *path, int path_size, const char *url)
Split a URL string into components.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int ffurl_open_whitelist(URLContext **puc, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options, const char *whitelist, const char *blacklist, URLContext *parent)
Create an URLContext for accessing to the resource indicated by url, and open it. ...
#define URL_PROTOCOL_FLAG_NETWORK
#define LIBAVUTIL_VERSION_INT
int ffurl_write(URLContext *h, const unsigned char *buf, int size)
Write size bytes from buf to the resource accessed by h.
AVIOInterruptCB interrupt_callback
const char * av_default_item_name(void *ptr)
Return the context name.
static int prompeg_open(URLContext *h, const char *uri, int flags)
static const AVClass prompeg_class
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static const AVOption options[]
static av_cold int end(AVCodecContext *avctx)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
static int prompeg_init(URLContext *h, const uint8_t *buf, int size)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * protocol_whitelist
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
const URLProtocol ff_prompeg_protocol
int ffurl_closep(URLContext **hh)
Close the resource accessed by the URLContext h, and free the memory used by it.
int ff_url_join(char *str, int size, const char *proto, const char *authorization, const char *hostname, int port, const char *fmt,...)
#define AV_LOG_INFO
Standard information.
const char * protocol_blacklist
PrompegFec ** fec_col_tmp
static int prompeg_write_fec(URLContext *h, PrompegFec *fec, uint8_t type)
Describe the class of an AVClass context structure.
static int prompeg_close(URLContext *h)
#define flags(name, subs,...)
static int prompeg_create_bitstring(URLContext *h, const uint8_t *buf, int size, uint8_t **bitstring)
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
static int prompeg_write(URLContext *h, const uint8_t *buf, int size)
int max_packet_size
if non zero, the stream is packetized with this max packet size
unbuffered private I/O API
uint32_t av_get_random_seed(void)
Get a seed to use in conjunction with random functions.
#define av_malloc_array(a, b)
static void xor_fast(const uint8_t *in1, const uint8_t *in2, uint8_t *out, int size)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions