Go to the documentation of this file.
22 #include <libxml/parser.h>
31 #define INITIAL_BUFFER_SIZE 32768
167 return ((
val + 0x3F) >> 6) << 6;
190 ret = sscanf(datetime,
"%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
195 timeinfo.tm_year = year - 1900;
196 timeinfo.tm_mon = month - 1;
197 timeinfo.tm_mday = day;
198 timeinfo.tm_hour = hour;
199 timeinfo.tm_min = minute;
200 timeinfo.tm_sec = (
int)second;
218 if (*ptr ==
'P' || *ptr ==
'T') {
229 days = (uint32_t)
value;
232 hours = (uint32_t)
value;
235 mins = (uint32_t)
value;
238 secs = (uint32_t)
value;
246 return ((days * 24 + hours) * 60 + mins) * 60 + secs;
261 if (num == cur_seq_no)
273 if (num == cur_seq_no)
367 for (
i = 0;
i <
c->n_videos;
i++) {
378 for (
i = 0;
i <
c->n_audios;
i++) {
389 for (
i = 0;
i <
c->n_subtitles;
i++) {
402 const char *proto_name =
NULL;
409 if (url[6] ==
'+' || url[6] ==
':')
421 if (strcmp(
c->allowed_extensions,
"ALL") && !
av_match_ext(url,
c->allowed_extensions)) {
423 "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
424 "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
433 if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] ==
':')
435 else if (
av_strstart(url,
"crypto",
NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] ==
':')
437 else if (strcmp(proto_name,
"file") || !strncmp(url,
"file,", 5))
444 char *new_cookies =
NULL;
467 char *rep_bandwidth_val,
476 if (!tmp_str || !tmp_str_2) {
480 for (
i = 0;
i < n_baseurl_nodes; ++
i) {
481 if (baseurl_nodes[
i] &&
482 baseurl_nodes[
i]->children &&
483 baseurl_nodes[
i]->children->type == XML_TEXT_NODE) {
484 text = xmlNodeGetContent(baseurl_nodes[
i]->children);
486 memset(tmp_str, 0, max_url_size);
487 memset(tmp_str_2, 0, max_url_size);
499 url =
av_strireplace(tmp_str,
"$RepresentationID$", (
const char*)rep_id_val);
505 if (rep_bandwidth_val && tmp_str[0] !=
'\0') {
508 url =
av_strireplace(tmp_str,
"$Bandwidth$", (
const char*)rep_bandwidth_val);
524 for (
i = 0;
i < n_nodes; ++
i) {
526 val = xmlGetProp(nodes[
i], attrname);
537 xmlNodePtr node = rootnode;
542 node = xmlFirstElementChild(node);
547 node = xmlNextElementSibling(node);
560 for (
i = 0;
i < 2;
i++) {
561 attr =
i ?
"mimeType" :
"contentType";
562 val = xmlGetProp(node, attr);
587 char *str_end_offset;
588 char *str_offset =
av_strtok(range,
"-", &str_end_offset);
597 xmlNodePtr fragmenturl_node,
598 xmlNodePtr *baseurl_nodes,
600 char *rep_bandwidth_val)
603 char *initialization_val =
NULL;
604 char *media_val =
NULL;
605 char *range_val =
NULL;
608 if (!
av_strcasecmp(fragmenturl_node->name, (
const char *)
"Initialization")) {
609 initialization_val = xmlGetProp(fragmenturl_node,
"sourceURL");
610 range_val = xmlGetProp(fragmenturl_node,
"range");
611 if (initialization_val || range_val) {
614 xmlFree(initialization_val);
626 xmlFree(initialization_val);
630 xmlFree(initialization_val);
633 }
else if (!
av_strcasecmp(fragmenturl_node->name, (
const char *)
"SegmentURL")) {
634 media_val = xmlGetProp(fragmenturl_node,
"media");
635 range_val = xmlGetProp(fragmenturl_node,
"mediaRange");
636 if (media_val || range_val) {
664 xmlNodePtr fragment_timeline_node)
666 xmlAttrPtr attr =
NULL;
669 if (!
av_strcasecmp(fragment_timeline_node->name, (
const char *)
"S")) {
674 attr = fragment_timeline_node->properties;
676 val = xmlGetProp(fragment_timeline_node, attr->name);
679 av_log(
s,
AV_LOG_WARNING,
"parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
701 char *tmp_str =
NULL;
703 char *mpdName =
NULL;
704 xmlNodePtr node =
NULL;
705 char *baseurl =
NULL;
706 char *root_url =
NULL;
716 int tmp_max_url_size = strlen(url);
718 for (
i = n_baseurl_nodes-1;
i >= 0 ;
i--) {
719 text = xmlNodeGetContent(baseurl_nodes[
i]);
722 tmp_max_url_size += strlen(text);
730 tmp_max_url_size =
aligned(tmp_max_url_size);
739 size = strlen(mpdName);
745 if (!tmp_str || !path) {
751 for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
752 if (!(node = baseurl_nodes[rootId])) {
755 text = xmlNodeGetContent(node);
763 node = baseurl_nodes[rootId];
764 baseurl = xmlNodeGetContent(node);
767 xmlNodeSetContent(node, root_url);
771 size = strlen(root_url);
772 isRootHttp =
ishttp(root_url);
774 if (root_url[
size - 1] != token) {
779 for (
i = 0;
i < n_baseurl_nodes; ++
i) {
783 text = xmlNodeGetContent(baseurl_nodes[
i]);
785 memset(tmp_str, 0, strlen(tmp_str));
786 if (!
ishttp(text) && isRootHttp) {
789 start = (text[0] == token);
791 xmlNodeSetContent(baseurl_nodes[
i], tmp_str);
798 if (tmp_max_url_size > *max_url_size) {
799 *max_url_size = tmp_max_url_size;
810 xmlNodePtr adaptionset_node,
811 xmlNodePtr mpd_baseurl_node,
812 xmlNodePtr period_baseurl_node,
813 xmlNodePtr period_segmenttemplate_node,
814 xmlNodePtr period_segmentlist_node,
815 xmlNodePtr fragment_template_node,
816 xmlNodePtr content_component_node,
817 xmlNodePtr adaptionset_baseurl_node,
818 xmlNodePtr adaptionset_segmentlist_node,
819 xmlNodePtr adaptionset_supplementalproperty_node)
828 xmlNodePtr representation_segmenttemplate_node =
NULL;
829 xmlNodePtr representation_baseurl_node =
NULL;
830 xmlNodePtr representation_segmentlist_node =
NULL;
831 xmlNodePtr segmentlists_tab[3];
832 xmlNodePtr fragment_timeline_node =
NULL;
833 xmlNodePtr fragment_templates_tab[5];
834 char *duration_val =
NULL;
835 char *presentation_timeoffset_val =
NULL;
836 char *startnumber_val =
NULL;
837 char *timescale_val =
NULL;
838 char *initialization_val =
NULL;
839 char *media_val =
NULL;
841 xmlNodePtr baseurl_nodes[4];
842 xmlNodePtr representation_node = node;
843 char *rep_id_val = xmlGetProp(representation_node,
"id");
844 char *rep_bandwidth_val = xmlGetProp(representation_node,
"bandwidth");
845 char *rep_framerate_val = xmlGetProp(representation_node,
"frameRate");
870 baseurl_nodes[0] = mpd_baseurl_node;
871 baseurl_nodes[1] = period_baseurl_node;
872 baseurl_nodes[2] = adaptionset_baseurl_node;
873 baseurl_nodes[3] = representation_baseurl_node;
876 c->max_url_size =
aligned(
c->max_url_size
877 + (rep_id_val ? strlen(rep_id_val) : 0)
878 + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
882 if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
883 fragment_timeline_node =
NULL;
884 fragment_templates_tab[0] = representation_segmenttemplate_node;
885 fragment_templates_tab[1] = adaptionset_segmentlist_node;
886 fragment_templates_tab[2] = fragment_template_node;
887 fragment_templates_tab[3] = period_segmenttemplate_node;
888 fragment_templates_tab[4] = period_segmentlist_node;
890 presentation_timeoffset_val =
get_val_from_nodes_tab(fragment_templates_tab, 4,
"presentationTimeOffset");
897 if (initialization_val) {
904 c->max_url_size =
aligned(
c->max_url_size + strlen(initialization_val));
913 xmlFree(initialization_val);
917 c->max_url_size =
aligned(
c->max_url_size + strlen(media_val));
922 if (presentation_timeoffset_val) {
925 xmlFree(presentation_timeoffset_val);
930 xmlFree(duration_val);
935 xmlFree(timescale_val);
937 if (startnumber_val) {
940 xmlFree(startnumber_val);
942 if (adaptionset_supplementalproperty_node) {
943 if (!
av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,
"schemeIdUri"),
"http://dashif.org/guidelines/last-segment-number")) {
944 val = xmlGetProp(adaptionset_supplementalproperty_node,
"value");
946 av_log(
s,
AV_LOG_ERROR,
"Missing value attribute in adaptionset_supplementalproperty_node\n");
956 if (!fragment_timeline_node)
958 if (!fragment_timeline_node)
960 if (!fragment_timeline_node)
962 if (fragment_timeline_node) {
963 fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
964 while (fragment_timeline_node) {
969 fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
972 }
else if (representation_baseurl_node && !representation_segmentlist_node) {
986 }
else if (representation_segmentlist_node) {
989 xmlNodePtr fragmenturl_node =
NULL;
990 segmentlists_tab[0] = representation_segmentlist_node;
991 segmentlists_tab[1] = adaptionset_segmentlist_node;
992 segmentlists_tab[2] = period_segmentlist_node;
999 xmlFree(duration_val);
1001 if (timescale_val) {
1004 xmlFree(timescale_val);
1006 fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1007 while (fragmenturl_node) {
1015 fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1020 if (!fragment_timeline_node)
1022 if (!fragment_timeline_node)
1024 if (!fragment_timeline_node)
1026 if (fragment_timeline_node) {
1027 fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1028 while (fragment_timeline_node) {
1033 fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1039 av_log(
s,
AV_LOG_ERROR,
"Unknown format of Representation node id[%s] \n", (
const char *)rep_id_val);
1045 rep->
bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1046 strncpy(rep->
id, rep_id_val ? rep_id_val :
"",
sizeof(rep->
id));
1064 rep->
rep_idx = subtitle_rep_idx;
1080 xmlFree(rep_id_val);
1081 if (rep_bandwidth_val)
1082 xmlFree(rep_bandwidth_val);
1083 if (rep_framerate_val)
1084 xmlFree(rep_framerate_val);
1090 xmlNodePtr adaptionset_node,
1091 xmlNodePtr mpd_baseurl_node,
1092 xmlNodePtr period_baseurl_node,
1093 xmlNodePtr period_segmenttemplate_node,
1094 xmlNodePtr period_segmentlist_node)
1097 xmlNodePtr fragment_template_node =
NULL;
1098 xmlNodePtr content_component_node =
NULL;
1099 xmlNodePtr adaptionset_baseurl_node =
NULL;
1100 xmlNodePtr adaptionset_segmentlist_node =
NULL;
1101 xmlNodePtr adaptionset_supplementalproperty_node =
NULL;
1102 xmlNodePtr node =
NULL;
1104 node = xmlFirstElementChild(adaptionset_node);
1106 if (!
av_strcasecmp(node->name, (
const char *)
"SegmentTemplate")) {
1107 fragment_template_node = node;
1108 }
else if (!
av_strcasecmp(node->name, (
const char *)
"ContentComponent")) {
1109 content_component_node = node;
1110 }
else if (!
av_strcasecmp(node->name, (
const char *)
"BaseURL")) {
1111 adaptionset_baseurl_node = node;
1112 }
else if (!
av_strcasecmp(node->name, (
const char *)
"SegmentList")) {
1113 adaptionset_segmentlist_node = node;
1114 }
else if (!
av_strcasecmp(node->name, (
const char *)
"SupplementalProperty")) {
1115 adaptionset_supplementalproperty_node = node;
1116 }
else if (!
av_strcasecmp(node->name, (
const char *)
"Representation")) {
1120 period_baseurl_node,
1121 period_segmenttemplate_node,
1122 period_segmentlist_node,
1123 fragment_template_node,
1124 content_component_node,
1125 adaptionset_baseurl_node,
1126 adaptionset_segmentlist_node,
1127 adaptionset_supplementalproperty_node);
1132 node = xmlNextElementSibling(node);
1141 node = xmlFirstElementChild(node);
1144 val = xmlNodeGetContent(node);
1149 val = xmlNodeGetContent(node);
1154 val = xmlNodeGetContent(node);
1159 node = xmlNextElementSibling(node);
1171 int64_t filesize = 0;
1175 xmlNodePtr root_element =
NULL;
1176 xmlNodePtr node =
NULL;
1177 xmlNodePtr period_node =
NULL;
1178 xmlNodePtr tmp_node =
NULL;
1179 xmlNodePtr mpd_baseurl_node =
NULL;
1180 xmlNodePtr period_baseurl_node =
NULL;
1181 xmlNodePtr period_segmenttemplate_node =
NULL;
1182 xmlNodePtr period_segmentlist_node =
NULL;
1183 xmlNodePtr adaptionset_node =
NULL;
1184 xmlAttrPtr attr =
NULL;
1186 uint32_t period_duration_sec = 0;
1187 uint32_t period_start_sec = 0;
1206 if (filesize <= 0) {
1207 filesize = 8 * 1024;
1217 if (filesize <= 0) {
1223 doc = xmlReadMemory(
buffer, filesize,
c->base_url,
NULL, 0);
1224 root_element = xmlDocGetRootElement(doc);
1225 node = root_element;
1233 if (node->type != XML_ELEMENT_NODE ||
1236 av_log(
s,
AV_LOG_ERROR,
"Unable to parse '%s' - wrong root node name[%s] type[%d]\n",
url, node->name, (
int)node->type);
1240 val = xmlGetProp(node,
"type");
1250 attr = node->properties;
1252 val = xmlGetProp(node, attr->name);
1254 if (!
av_strcasecmp(attr->name, (
const char *)
"availabilityStartTime")) {
1256 av_log(
s,
AV_LOG_TRACE,
"c->availability_start_time = [%"PRId64
"]\n",
c->availability_start_time);
1257 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"availabilityEndTime")) {
1260 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"publishTime")) {
1263 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"minimumUpdatePeriod")) {
1266 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"timeShiftBufferDepth")) {
1268 av_log(
s,
AV_LOG_TRACE,
"c->time_shift_buffer_depth = [%"PRId64
"]\n",
c->time_shift_buffer_depth);
1269 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"minBufferTime")) {
1272 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"suggestedPresentationDelay")) {
1274 av_log(
s,
AV_LOG_TRACE,
"c->suggested_presentation_delay = [%"PRId64
"]\n",
c->suggested_presentation_delay);
1275 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"mediaPresentationDuration")) {
1277 av_log(
s,
AV_LOG_TRACE,
"c->media_presentation_duration = [%"PRId64
"]\n",
c->media_presentation_duration);
1285 mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1287 mpd_baseurl_node = xmlNewNode(
NULL,
"BaseURL");
1291 node = xmlFirstElementChild(node);
1294 period_duration_sec = 0;
1295 period_start_sec = 0;
1296 attr = node->properties;
1298 val = xmlGetProp(node, attr->name);
1301 }
else if (!
av_strcasecmp(attr->name, (
const char *)
"start")) {
1307 if ((period_duration_sec) >= (
c->period_duration)) {
1309 c->period_duration = period_duration_sec;
1310 c->period_start = period_start_sec;
1311 if (
c->period_start > 0)
1312 c->media_presentation_duration =
c->period_duration;
1314 }
else if (!
av_strcasecmp(node->name,
"ProgramInformation")) {
1317 node = xmlNextElementSibling(node);
1325 adaptionset_node = xmlFirstElementChild(period_node);
1326 while (adaptionset_node) {
1327 if (!
av_strcasecmp(adaptionset_node->name, (
const char *)
"BaseURL")) {
1328 period_baseurl_node = adaptionset_node;
1329 }
else if (!
av_strcasecmp(adaptionset_node->name, (
const char *)
"SegmentTemplate")) {
1330 period_segmenttemplate_node = adaptionset_node;
1331 }
else if (!
av_strcasecmp(adaptionset_node->name, (
const char *)
"SegmentList")) {
1332 period_segmentlist_node = adaptionset_node;
1333 }
else if (!
av_strcasecmp(adaptionset_node->name, (
const char *)
"AdaptationSet")) {
1336 adaptionset_node = xmlNextElementSibling(adaptionset_node);
1342 xmlFreeNode(mpd_baseurl_node);
1357 int64_t start_time_offset = 0;
1375 }
else if (
c->publish_time > 0 && !
c->availability_start_time) {
1376 if (
c->min_buffer_time) {
1417 num =
c->period_duration / length_of_each_segment;
1433 if (rep_dest && rep_src ) {
1447 if (rep_dest && rep_src ) {
1468 int n_videos =
c->n_videos;
1470 int n_audios =
c->n_audios;
1472 int n_subtitles =
c->n_subtitles;
1474 char *base_url =
c->base_url;
1482 c->subtitles =
NULL;
1487 if (
c->n_videos != n_videos) {
1489 "new manifest has mismatched no. of video representations, %d -> %d\n",
1490 n_videos,
c->n_videos);
1493 if (
c->n_audios != n_audios) {
1495 "new manifest has mismatched no. of audio representations, %d -> %d\n",
1496 n_audios,
c->n_audios);
1499 if (
c->n_subtitles != n_subtitles) {
1501 "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1502 n_subtitles,
c->n_subtitles);
1506 for (
i = 0;
i < n_videos;
i++) {
1522 for (
i = 0;
i < n_audios;
i++) {
1544 c->base_url = base_url;
1553 c->n_subtitles = n_subtitles;
1554 c->subtitles = subtitles;
1555 c->n_audios = n_audios;
1557 c->n_videos = n_videos;
1564 int64_t min_seq_no = 0;
1565 int64_t max_seq_no = 0;
1585 }
else if (
c->is_live) {
1665 if (seg->
size >= 0) {
1687 static const int max_init_section_size = 1024 * 1024;
1699 "Failed to open an initialization section in playlist %d\n",
1709 sec_size = max_init_section_size;
1712 "Downloading an initialization section of size %"PRId64
"\n",
1715 sec_size =
FFMIN(sec_size, max_init_section_size);
1808 const char *
opts[] = {
1809 "headers",
"user_agent",
"cookies",
"http_proxy",
"referer",
"rw_timeout",
NULL };
1810 const char **opt =
opts;
1816 if (
buf[0] !=
'\0') {
1836 "A DASH playlist item '%s' referred to an external file '%s'. "
1837 "Opening this file was forbidden for security reasons\n",
1875 if (!avio_ctx_buffer ) {
1911 #if FF_API_R_FRAME_RATE
1967 if (first_init_section ==
NULL || n_pls == 0)
1970 url = first_init_section->
url;
1973 for (
i=0;
i<n_pls;
i++) {
2005 c->interrupt_callback = &
s->interrupt_callback;
2016 s->duration = (int64_t)
c->media_presentation_duration *
AV_TIME_BASE;
2025 for (
i = 0;
i <
c->n_videos;
i++) {
2027 if (
i > 0 &&
c->is_init_section_common_video) {
2043 for (
i = 0;
i <
c->n_audios;
i++) {
2045 if (
i > 0 &&
c->is_init_section_common_audio) {
2061 for (
i = 0;
i <
c->n_subtitles;
i++) {
2062 rep =
c->subtitles[
i];
2063 if (
i > 0 &&
c->is_init_section_common_audio) {
2089 for (
i = 0;
i <
c->n_videos;
i++) {
2098 for (
i = 0;
i <
c->n_audios;
i++) {
2107 for (
i = 0;
i <
c->n_subtitles;
i++) {
2108 rep =
c->subtitles[
i];
2125 for (
i = 0;
i <
n;
i++) {
2133 for (j = 0; j <
n; j++) {
2159 for (
i = 0;
i <
c->n_videos;
i++) {
2168 for (
i = 0;
i <
c->n_audios;
i++) {
2178 for (
i = 0;
i <
c->n_subtitles;
i++) {
2179 rep =
c->subtitles[
i];
2229 seek_pos_msec, pls->
rep_idx, dry_run ?
" (dry)" :
"");
2248 "last_seq_no[%"PRId64
"], playlist %d.\n",
2298 for (
i = 0;
i <
c->n_videos;
i++) {
2302 for (
i = 0;
i <
c->n_audios;
i++) {
2306 for (
i = 0;
i <
c->n_subtitles;
i++) {
2319 if (
av_stristr(p->
buf,
"dash:profile:isoff-on-demand:2011") ||
2332 #define OFFSET(x) offsetof(DASHContext, x)
2333 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2335 {
"allowed_extensions",
"List of file extensions that dash is allowed to access",
2337 {.str =
"aac,m4a,m4s,m4v,mov,mp4,webm"},
2338 INT_MIN, INT_MAX,
FLAGS},
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
static void close_demux_for_component(struct representation *pls)
#define AV_LOG_WARNING
Something somehow does not look correct.
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
static uint64_t get_current_time_in_sec(void)
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
static int ishttp(char *url)
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
static void free_video_list(DASHContext *c)
#define AVERROR_EOF
End of file.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
uint32_t init_sec_buf_read_offset
static struct fragment * get_current_fragment(struct representation *pls)
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
static av_cold int end(AVCodecContext *avctx)
static int dash_close(AVFormatContext *s)
static av_cold void cleanup(FlashSV2Context *s)
AVStream ** streams
A list of all streams in the file.
AVIOInterruptCB * interrupt_callback
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
#define AV_LOG_VERBOSE
Detailed information.
int64_t probesize
Maximum size of the data read from input for determining the input container format.
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
int64_t avio_size(AVIOContext *s)
Get the filesize.
uint64_t availability_end_time
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Callback for checking whether to abort blocking functions.
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
struct representation ** subtitles
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
struct fragment * init_section
static void free_timelines_list(struct representation *pls)
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
static void free_fragment(struct fragment **seg)
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
static av_cold int read_close(AVFormatContext *ctx)
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
@ AV_ROUND_UP
Round toward +infinity.
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
AVProgram * av_new_program(AVFormatContext *s, int id)
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
static enum AVMediaType get_content_type(xmlNodePtr node)
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
static const AVOption dash_options[]
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
uint64_t suggested_presentation_delay
static int64_t seek_data(void *opaque, int64_t offset, int whence)
static int aligned(int val)
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void free_representation(struct representation *pls)
char * allowed_extensions
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
int flags
Flags modifying the (de)muxer behaviour.
static void free_fragment_list(struct representation *pls)
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
struct representation ** videos
#define INITIAL_BUFFER_SIZE
uint32_t init_sec_buf_size
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
@ AVDISCARD_ALL
discard all
AVCodecParameters * codecpar
Codec parameters associated with this stream.
#define LIBAVUTIL_VERSION_INT
static int read_header(FFV1Context *f)
Describe the class of an AVClass context structure.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Rational number (pair of numerator and denominator).
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
static int is_common_init_section_exist(struct representation **pls, int n_pls)
const char * av_default_item_name(void *ptr)
Return the context name.
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
AVIOContext * pb
I/O context.
This structure contains the data a format has to probe a file.
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
struct timeline ** timelines
uint64_t minimum_update_period
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http)
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
uint64_t time_shift_buffer_depth
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
uint64_t media_presentation_duration
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
static int64_t start_time
static AVRational av_make_q(int num, int den)
Create an AVRational.
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
const char const char void * val
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
uint64_t availability_start_time
AVInputFormat ff_dash_demuxer
uint32_t init_sec_data_len
static int dash_read_header(AVFormatContext *s)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void free_audio_list(DASHContext *c)
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
#define AV_LOG_INFO
Standard information.
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void free_subtitle_list(DASHContext *c)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
static int dash_probe(const AVProbeData *p)
#define AV_TIME_BASE
Internal time base represented as integer.
struct representation ** audios
int64_t fragment_timescale
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
@ AV_ROUND_DOWN
Round toward -infinity.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
int is_init_section_common_audio
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
New fields can be added to the end with minor version bumps.
static int save_avio_options(AVFormatContext *s)
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
int id
Format-specific stream ID.
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
static struct fragment * get_Fragment(char *range)
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
void ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
struct fragment * cur_seg
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
int is_init_section_common_video
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
AVRational r_frame_rate
Real base framerate of the stream.
static int refresh_manifest(AVFormatContext *s)
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
static int update_init_section(struct representation *pls)
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
int64_t av_gettime(void)
Get the current time in microseconds.
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
#define AVIO_FLAG_READ
read-only
char * av_strdup(const char *s)
Duplicate a string.
unsigned char * buffer
Start of the buffer.
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
This structure stores compressed data.
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
int64_t fragment_duration
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
#define flags(name, subs,...)
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
int pts_wrap_bits
number of bits in pts (used for wrapping control)
struct fragment ** fragments
void * priv_data
Format private data.
static const AVClass dash_class
int64_t presentation_timeoffset
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)