FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 
31 #define INITIAL_BUFFER_SIZE 32768
32 
33 struct fragment {
34  int64_t url_offset;
35  int64_t size;
36  char *url;
37 };
38 
39 /*
40  * reference to : ISO_IEC_23009-1-DASH-2012
41  * Section: 5.3.9.6.2
42  * Table: Table 17 — Semantics of SegmentTimeline element
43  * */
44 struct timeline {
45  /* starttime: Element or Attribute Name
46  * specifies the MPD start time, in @timescale units,
47  * the first Segment in the series starts relative to the beginning of the Period.
48  * The value of this attribute must be equal to or greater than the sum of the previous S
49  * element earliest presentation time and the sum of the contiguous Segment durations.
50  * If the value of the attribute is greater than what is expressed by the previous S element,
51  * it expresses discontinuities in the timeline.
52  * If not present then the value shall be assumed to be zero for the first S element
53  * and for the subsequent S elements, the value shall be assumed to be the sum of
54  * the previous S element's earliest presentation time and contiguous duration
55  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
56  * */
57  int64_t starttime;
58  /* repeat: Element or Attribute Name
59  * specifies the repeat count of the number of following contiguous Segments with
60  * the same duration expressed by the value of @duration. This value is zero-based
61  * (e.g. a value of three means four Segments in the contiguous series).
62  * */
63  int64_t repeat;
64  /* duration: Element or Attribute Name
65  * specifies the Segment duration, in units of the value of the @timescale.
66  * */
67  int64_t duration;
68 };
69 
70 /*
71  * Each playlist has its own demuxer. If it is currently active,
72  * it has an opened AVIOContext too, and potentially an AVPacket
73  * containing the next packet from this stream.
74  */
76  char *url_template;
82  int rep_idx;
83  int rep_count;
85 
87  char id[20];
88  int bandwidth;
90  AVStream *assoc_stream; /* demuxer stream associated with this representation */
91 
93  struct fragment **fragments; /* VOD list of fragment for profile */
94 
96  struct timeline **timelines;
97 
98  int64_t first_seq_no;
99  int64_t last_seq_no;
100  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
101 
104 
106 
107  int64_t cur_seq_no;
108  int64_t cur_seg_offset;
109  int64_t cur_seg_size;
110  struct fragment *cur_seg;
111 
112  /* Currently active Media Initialization Section */
118  int64_t cur_timestamp;
120 };
121 
122 typedef struct DASHContext {
123  const AVClass *class;
124  char *base_url;
138 
139  int n_videos;
141  int n_audios;
145 
146  /* MediaPresentationDescription Attribute */
151  uint64_t publish_time;
154  uint64_t min_buffer_time;
155 
156  /* Period Attribute */
157  uint64_t period_duration;
158  uint64_t period_start;
159 
160  int is_live;
165 
166  /* Flags for init section*/
169 
170 } DASHContext;
171 
172 static int ishttp(char *url)
173 {
174  const char *proto_name = avio_find_protocol_name(url);
175  return av_strstart(proto_name, "http", NULL);
176 }
177 
178 static int aligned(int val)
179 {
180  return ((val + 0x3F) >> 6) << 6;
181 }
182 
183 static uint64_t get_current_time_in_sec(void)
184 {
185  return av_gettime() / 1000000;
186 }
187 
188 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
189 {
190  struct tm timeinfo;
191  int year = 0;
192  int month = 0;
193  int day = 0;
194  int hour = 0;
195  int minute = 0;
196  int ret = 0;
197  float second = 0.0;
198 
199  /* ISO-8601 date parser */
200  if (!datetime)
201  return 0;
202 
203  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
204  /* year, month, day, hour, minute, second 6 arguments */
205  if (ret != 6) {
206  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
207  }
208  timeinfo.tm_year = year - 1900;
209  timeinfo.tm_mon = month - 1;
210  timeinfo.tm_mday = day;
211  timeinfo.tm_hour = hour;
212  timeinfo.tm_min = minute;
213  timeinfo.tm_sec = (int)second;
214 
215  return av_timegm(&timeinfo);
216 }
217 
218 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
219 {
220  /* ISO-8601 duration parser */
221  uint32_t days = 0;
222  uint32_t hours = 0;
223  uint32_t mins = 0;
224  uint32_t secs = 0;
225  int size = 0;
226  float value = 0;
227  char type = '\0';
228  const char *ptr = duration;
229 
230  while (*ptr) {
231  if (*ptr == 'P' || *ptr == 'T') {
232  ptr++;
233  continue;
234  }
235 
236  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
237  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
238  return 0; /* parser error */
239  }
240  switch (type) {
241  case 'D':
242  days = (uint32_t)value;
243  break;
244  case 'H':
245  hours = (uint32_t)value;
246  break;
247  case 'M':
248  mins = (uint32_t)value;
249  break;
250  case 'S':
251  secs = (uint32_t)value;
252  break;
253  default:
254  // handle invalid type
255  break;
256  }
257  ptr += size;
258  }
259  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
260 }
261 
262 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
263 {
264  int64_t start_time = 0;
265  int64_t i = 0;
266  int64_t j = 0;
267  int64_t num = 0;
268 
269  if (pls->n_timelines) {
270  for (i = 0; i < pls->n_timelines; i++) {
271  if (pls->timelines[i]->starttime > 0) {
272  start_time = pls->timelines[i]->starttime;
273  }
274  if (num == cur_seq_no)
275  goto finish;
276 
277  start_time += pls->timelines[i]->duration;
278 
279  if (pls->timelines[i]->repeat == -1) {
280  start_time = pls->timelines[i]->duration * cur_seq_no;
281  goto finish;
282  }
283 
284  for (j = 0; j < pls->timelines[i]->repeat; j++) {
285  num++;
286  if (num == cur_seq_no)
287  goto finish;
288  start_time += pls->timelines[i]->duration;
289  }
290  num++;
291  }
292  }
293 finish:
294  return start_time;
295 }
296 
297 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
298 {
299  int64_t i = 0;
300  int64_t j = 0;
301  int64_t num = 0;
302  int64_t start_time = 0;
303 
304  for (i = 0; i < pls->n_timelines; i++) {
305  if (pls->timelines[i]->starttime > 0) {
306  start_time = pls->timelines[i]->starttime;
307  }
308  if (start_time > cur_time)
309  goto finish;
310 
311  start_time += pls->timelines[i]->duration;
312  for (j = 0; j < pls->timelines[i]->repeat; j++) {
313  num++;
314  if (start_time > cur_time)
315  goto finish;
316  start_time += pls->timelines[i]->duration;
317  }
318  num++;
319  }
320 
321  return -1;
322 
323 finish:
324  return num;
325 }
326 
327 static void free_fragment(struct fragment **seg)
328 {
329  if (!(*seg)) {
330  return;
331  }
332  av_freep(&(*seg)->url);
333  av_freep(seg);
334 }
335 
336 static void free_fragment_list(struct representation *pls)
337 {
338  int i;
339 
340  for (i = 0; i < pls->n_fragments; i++) {
341  free_fragment(&pls->fragments[i]);
342  }
343  av_freep(&pls->fragments);
344  pls->n_fragments = 0;
345 }
346 
347 static void free_timelines_list(struct representation *pls)
348 {
349  int i;
350 
351  for (i = 0; i < pls->n_timelines; i++) {
352  av_freep(&pls->timelines[i]);
353  }
354  av_freep(&pls->timelines);
355  pls->n_timelines = 0;
356 }
357 
358 static void free_representation(struct representation *pls)
359 {
360  free_fragment_list(pls);
361  free_timelines_list(pls);
362  free_fragment(&pls->cur_seg);
364  av_freep(&pls->init_sec_buf);
365  av_freep(&pls->pb.buffer);
366  if (pls->input)
367  ff_format_io_close(pls->parent, &pls->input);
368  if (pls->ctx) {
369  pls->ctx->pb = NULL;
370  avformat_close_input(&pls->ctx);
371  }
372 
373  av_freep(&pls->url_template);
374  av_freep(&pls);
375 }
376 
378 {
379  int i;
380  for (i = 0; i < c->n_videos; i++) {
381  struct representation *pls = c->videos[i];
382  free_representation(pls);
383  }
384  av_freep(&c->videos);
385  c->n_videos = 0;
386 }
387 
389 {
390  int i;
391  for (i = 0; i < c->n_audios; i++) {
392  struct representation *pls = c->audios[i];
393  free_representation(pls);
394  }
395  av_freep(&c->audios);
396  c->n_audios = 0;
397 }
398 
400 {
401  int i;
402  for (i = 0; i < c->n_subtitles; i++) {
403  struct representation *pls = c->subtitles[i];
404  free_representation(pls);
405  }
406  av_freep(&c->subtitles);
407  c->n_subtitles = 0;
408 }
409 
410 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
411  AVDictionary *opts, AVDictionary *opts2, int *is_http)
412 {
413  DASHContext *c = s->priv_data;
414  AVDictionary *tmp = NULL;
415  const char *proto_name = NULL;
416  int ret;
417 
418  av_dict_copy(&tmp, opts, 0);
419  av_dict_copy(&tmp, opts2, 0);
420 
421  if (av_strstart(url, "crypto", NULL)) {
422  if (url[6] == '+' || url[6] == ':')
423  proto_name = avio_find_protocol_name(url + 7);
424  }
425 
426  if (!proto_name)
427  proto_name = avio_find_protocol_name(url);
428 
429  if (!proto_name)
430  return AVERROR_INVALIDDATA;
431 
432  // only http(s) & file are allowed
433  if (av_strstart(proto_name, "file", NULL)) {
434  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
435  av_log(s, AV_LOG_ERROR,
436  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
437  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
438  url);
439  return AVERROR_INVALIDDATA;
440  }
441  } else if (av_strstart(proto_name, "http", NULL)) {
442  ;
443  } else
444  return AVERROR_INVALIDDATA;
445 
446  if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
447  ;
448  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':')
449  ;
450  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
451  return AVERROR_INVALIDDATA;
452 
453  av_freep(pb);
454  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
455  if (ret >= 0) {
456  // update cookies on http response with setcookies.
457  char *new_cookies = NULL;
458 
459  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
460  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
461 
462  if (new_cookies) {
463  av_dict_set(&opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
464  }
465 
466  }
467 
468  av_dict_free(&tmp);
469 
470  if (is_http)
471  *is_http = av_strstart(proto_name, "http", NULL);
472 
473  return ret;
474 }
475 
476 static char *get_content_url(xmlNodePtr *baseurl_nodes,
477  int n_baseurl_nodes,
478  int max_url_size,
479  char *rep_id_val,
480  char *rep_bandwidth_val,
481  char *val)
482 {
483  int i;
484  char *text;
485  char *url = NULL;
486  char *tmp_str = av_mallocz(max_url_size);
487  char *tmp_str_2 = av_mallocz(max_url_size);
488 
489  if (!tmp_str || !tmp_str_2) {
490  return NULL;
491  }
492 
493  for (i = 0; i < n_baseurl_nodes; ++i) {
494  if (baseurl_nodes[i] &&
495  baseurl_nodes[i]->children &&
496  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
497  text = xmlNodeGetContent(baseurl_nodes[i]->children);
498  if (text) {
499  memset(tmp_str, 0, max_url_size);
500  memset(tmp_str_2, 0, max_url_size);
501  ff_make_absolute_url(tmp_str_2, max_url_size, tmp_str, text);
502  av_strlcpy(tmp_str, tmp_str_2, max_url_size);
503  xmlFree(text);
504  }
505  }
506  }
507 
508  if (val)
509  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
510 
511  if (rep_id_val) {
512  url = av_strireplace(tmp_str, "$RepresentationID$", (const char*)rep_id_val);
513  if (!url) {
514  goto end;
515  }
516  av_strlcpy(tmp_str, url, max_url_size);
517  }
518  if (rep_bandwidth_val && tmp_str[0] != '\0') {
519  // free any previously assigned url before reassigning
520  av_free(url);
521  url = av_strireplace(tmp_str, "$Bandwidth$", (const char*)rep_bandwidth_val);
522  if (!url) {
523  goto end;
524  }
525  }
526 end:
527  av_free(tmp_str);
528  av_free(tmp_str_2);
529  return url;
530 }
531 
532 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
533 {
534  int i;
535  char *val;
536 
537  for (i = 0; i < n_nodes; ++i) {
538  if (nodes[i]) {
539  val = xmlGetProp(nodes[i], attrname);
540  if (val)
541  return val;
542  }
543  }
544 
545  return NULL;
546 }
547 
548 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
549 {
550  xmlNodePtr node = rootnode;
551  if (!node) {
552  return NULL;
553  }
554 
555  node = xmlFirstElementChild(node);
556  while (node) {
557  if (!av_strcasecmp(node->name, nodename)) {
558  return node;
559  }
560  node = xmlNextElementSibling(node);
561  }
562  return NULL;
563 }
564 
565 static enum AVMediaType get_content_type(xmlNodePtr node)
566 {
568  int i = 0;
569  const char *attr;
570  char *val = NULL;
571 
572  if (node) {
573  for (i = 0; i < 2; i++) {
574  attr = i ? "mimeType" : "contentType";
575  val = xmlGetProp(node, attr);
576  if (val) {
577  if (av_stristr((const char *)val, "video")) {
578  type = AVMEDIA_TYPE_VIDEO;
579  } else if (av_stristr((const char *)val, "audio")) {
580  type = AVMEDIA_TYPE_AUDIO;
581  } else if (av_stristr((const char *)val, "text")) {
582  type = AVMEDIA_TYPE_SUBTITLE;
583  }
584  xmlFree(val);
585  }
586  }
587  }
588  return type;
589 }
590 
591 static struct fragment * get_Fragment(char *range)
592 {
593  struct fragment * seg = av_mallocz(sizeof(struct fragment));
594 
595  if (!seg)
596  return NULL;
597 
598  seg->size = -1;
599  if (range) {
600  char *str_end_offset;
601  char *str_offset = av_strtok(range, "-", &str_end_offset);
602  seg->url_offset = strtoll(str_offset, NULL, 10);
603  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset;
604  }
605 
606  return seg;
607 }
608 
610  xmlNodePtr fragmenturl_node,
611  xmlNodePtr *baseurl_nodes,
612  char *rep_id_val,
613  char *rep_bandwidth_val)
614 {
615  DASHContext *c = s->priv_data;
616  char *initialization_val = NULL;
617  char *media_val = NULL;
618  char *range_val = NULL;
619  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
620 
621  if (!av_strcasecmp(fragmenturl_node->name, (const char *)"Initialization")) {
622  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
623  range_val = xmlGetProp(fragmenturl_node, "range");
624  if (initialization_val || range_val) {
625  rep->init_section = get_Fragment(range_val);
626  if (!rep->init_section) {
627  xmlFree(initialization_val);
628  xmlFree(range_val);
629  return AVERROR(ENOMEM);
630  }
631  rep->init_section->url = get_content_url(baseurl_nodes, 4,
632  max_url_size,
633  rep_id_val,
634  rep_bandwidth_val,
635  initialization_val);
636 
637  if (!rep->init_section->url) {
638  av_free(rep->init_section);
639  xmlFree(initialization_val);
640  xmlFree(range_val);
641  return AVERROR(ENOMEM);
642  }
643  xmlFree(initialization_val);
644  xmlFree(range_val);
645  }
646  } else if (!av_strcasecmp(fragmenturl_node->name, (const char *)"SegmentURL")) {
647  media_val = xmlGetProp(fragmenturl_node, "media");
648  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
649  if (media_val || range_val) {
650  struct fragment *seg = get_Fragment(range_val);
651  if (!seg) {
652  xmlFree(media_val);
653  xmlFree(range_val);
654  return AVERROR(ENOMEM);
655  }
656  seg->url = get_content_url(baseurl_nodes, 4,
657  max_url_size,
658  rep_id_val,
659  rep_bandwidth_val,
660  media_val);
661  if (!seg->url) {
662  av_free(seg);
663  xmlFree(media_val);
664  xmlFree(range_val);
665  return AVERROR(ENOMEM);
666  }
667  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
668  xmlFree(media_val);
669  xmlFree(range_val);
670  }
671  }
672 
673  return 0;
674 }
675 
677  xmlNodePtr fragment_timeline_node)
678 {
679  xmlAttrPtr attr = NULL;
680  char *val = NULL;
681 
682  if (!av_strcasecmp(fragment_timeline_node->name, (const char *)"S")) {
683  struct timeline *tml = av_mallocz(sizeof(struct timeline));
684  if (!tml) {
685  return AVERROR(ENOMEM);
686  }
687  attr = fragment_timeline_node->properties;
688  while (attr) {
689  val = xmlGetProp(fragment_timeline_node, attr->name);
690 
691  if (!val) {
692  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
693  continue;
694  }
695 
696  if (!av_strcasecmp(attr->name, (const char *)"t")) {
697  tml->starttime = (int64_t)strtoll(val, NULL, 10);
698  } else if (!av_strcasecmp(attr->name, (const char *)"r")) {
699  tml->repeat =(int64_t) strtoll(val, NULL, 10);
700  } else if (!av_strcasecmp(attr->name, (const char *)"d")) {
701  tml->duration = (int64_t)strtoll(val, NULL, 10);
702  }
703  attr = attr->next;
704  xmlFree(val);
705  }
706  dynarray_add(&rep->timelines, &rep->n_timelines, tml);
707  }
708 
709  return 0;
710 }
711 
712 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
713 {
714  char *tmp_str = NULL;
715  char *path = NULL;
716  char *mpdName = NULL;
717  xmlNodePtr node = NULL;
718  char *baseurl = NULL;
719  char *root_url = NULL;
720  char *text = NULL;
721  char *tmp = NULL;
722  int isRootHttp = 0;
723  char token ='/';
724  int start = 0;
725  int rootId = 0;
726  int updated = 0;
727  int size = 0;
728  int i;
729  int tmp_max_url_size = strlen(url);
730 
731  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
732  text = xmlNodeGetContent(baseurl_nodes[i]);
733  if (!text)
734  continue;
735  tmp_max_url_size += strlen(text);
736  if (ishttp(text)) {
737  xmlFree(text);
738  break;
739  }
740  xmlFree(text);
741  }
742 
743  tmp_max_url_size = aligned(tmp_max_url_size);
744  text = av_mallocz(tmp_max_url_size);
745  if (!text) {
746  updated = AVERROR(ENOMEM);
747  goto end;
748  }
749  av_strlcpy(text, url, strlen(url)+1);
750  tmp = text;
751  while (mpdName = av_strtok(tmp, "/", &tmp)) {
752  size = strlen(mpdName);
753  }
754  av_free(text);
755 
756  path = av_mallocz(tmp_max_url_size);
757  tmp_str = av_mallocz(tmp_max_url_size);
758  if (!tmp_str || !path) {
759  updated = AVERROR(ENOMEM);
760  goto end;
761  }
762 
763  av_strlcpy (path, url, strlen(url) - size + 1);
764  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
765  if (!(node = baseurl_nodes[rootId])) {
766  continue;
767  }
768  text = xmlNodeGetContent(node);
769  if (ishttp(text)) {
770  xmlFree(text);
771  break;
772  }
773  xmlFree(text);
774  }
775 
776  node = baseurl_nodes[rootId];
777  baseurl = xmlNodeGetContent(node);
778  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
779  if (node) {
780  xmlNodeSetContent(node, root_url);
781  updated = 1;
782  }
783 
784  size = strlen(root_url);
785  isRootHttp = ishttp(root_url);
786 
787  if (root_url[size - 1] != token) {
788  av_strlcat(root_url, "/", size + 2);
789  size += 2;
790  }
791 
792  for (i = 0; i < n_baseurl_nodes; ++i) {
793  if (i == rootId) {
794  continue;
795  }
796  text = xmlNodeGetContent(baseurl_nodes[i]);
797  if (text) {
798  memset(tmp_str, 0, strlen(tmp_str));
799  if (!ishttp(text) && isRootHttp) {
800  av_strlcpy(tmp_str, root_url, size + 1);
801  }
802  start = (text[0] == token);
803  av_strlcat(tmp_str, text + start, tmp_max_url_size);
804  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
805  updated = 1;
806  xmlFree(text);
807  }
808  }
809 
810 end:
811  if (tmp_max_url_size > *max_url_size) {
812  *max_url_size = tmp_max_url_size;
813  }
814  av_free(path);
815  av_free(tmp_str);
816  xmlFree(baseurl);
817  return updated;
818 
819 }
820 
822  xmlNodePtr node,
823  xmlNodePtr adaptionset_node,
824  xmlNodePtr mpd_baseurl_node,
825  xmlNodePtr period_baseurl_node,
826  xmlNodePtr period_segmenttemplate_node,
827  xmlNodePtr period_segmentlist_node,
828  xmlNodePtr fragment_template_node,
829  xmlNodePtr content_component_node,
830  xmlNodePtr adaptionset_baseurl_node,
831  xmlNodePtr adaptionset_segmentlist_node,
832  xmlNodePtr adaptionset_supplementalproperty_node)
833 {
834  int32_t ret = 0;
835  int32_t subtitle_rep_idx = 0;
836  int32_t audio_rep_idx = 0;
837  int32_t video_rep_idx = 0;
838  DASHContext *c = s->priv_data;
839  struct representation *rep = NULL;
840  struct fragment *seg = NULL;
841  xmlNodePtr representation_segmenttemplate_node = NULL;
842  xmlNodePtr representation_baseurl_node = NULL;
843  xmlNodePtr representation_segmentlist_node = NULL;
844  xmlNodePtr segmentlists_tab[3];
845  xmlNodePtr fragment_timeline_node = NULL;
846  xmlNodePtr fragment_templates_tab[5];
847  char *duration_val = NULL;
848  char *presentation_timeoffset_val = NULL;
849  char *startnumber_val = NULL;
850  char *timescale_val = NULL;
851  char *initialization_val = NULL;
852  char *media_val = NULL;
853  char *val = NULL;
854  xmlNodePtr baseurl_nodes[4];
855  xmlNodePtr representation_node = node;
856  char *rep_id_val = xmlGetProp(representation_node, "id");
857  char *rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
858  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
860 
861  // try get information from representation
862  if (type == AVMEDIA_TYPE_UNKNOWN)
863  type = get_content_type(representation_node);
864  // try get information from contentComponen
865  if (type == AVMEDIA_TYPE_UNKNOWN)
866  type = get_content_type(content_component_node);
867  // try get information from adaption set
868  if (type == AVMEDIA_TYPE_UNKNOWN)
869  type = get_content_type(adaptionset_node);
870  if (type == AVMEDIA_TYPE_UNKNOWN) {
871  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
872  } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
873  // convert selected representation to our internal struct
874  rep = av_mallocz(sizeof(struct representation));
875  if (!rep) {
876  ret = AVERROR(ENOMEM);
877  goto end;
878  }
879  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
880  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
881  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
882 
883  baseurl_nodes[0] = mpd_baseurl_node;
884  baseurl_nodes[1] = period_baseurl_node;
885  baseurl_nodes[2] = adaptionset_baseurl_node;
886  baseurl_nodes[3] = representation_baseurl_node;
887 
888  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
890  + (rep_id_val ? strlen(rep_id_val) : 0)
891  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
892  if (ret == AVERROR(ENOMEM) || ret == 0) {
893  goto end;
894  }
895  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
896  fragment_timeline_node = NULL;
897  fragment_templates_tab[0] = representation_segmenttemplate_node;
898  fragment_templates_tab[1] = adaptionset_segmentlist_node;
899  fragment_templates_tab[2] = fragment_template_node;
900  fragment_templates_tab[3] = period_segmenttemplate_node;
901  fragment_templates_tab[4] = period_segmentlist_node;
902 
903  presentation_timeoffset_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
904  duration_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
905  startnumber_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
906  timescale_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
907  initialization_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
908  media_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
909 
910  if (initialization_val) {
911  rep->init_section = av_mallocz(sizeof(struct fragment));
912  if (!rep->init_section) {
913  av_free(rep);
914  ret = AVERROR(ENOMEM);
915  goto end;
916  }
917  c->max_url_size = aligned(c->max_url_size + strlen(initialization_val));
918  rep->init_section->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, initialization_val);
919  if (!rep->init_section->url) {
920  av_free(rep->init_section);
921  av_free(rep);
922  ret = AVERROR(ENOMEM);
923  goto end;
924  }
925  rep->init_section->size = -1;
926  xmlFree(initialization_val);
927  }
928 
929  if (media_val) {
930  c->max_url_size = aligned(c->max_url_size + strlen(media_val));
931  rep->url_template = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, media_val);
932  xmlFree(media_val);
933  }
934 
935  if (presentation_timeoffset_val) {
936  rep->presentation_timeoffset = (int64_t) strtoll(presentation_timeoffset_val, NULL, 10);
937  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
938  xmlFree(presentation_timeoffset_val);
939  }
940  if (duration_val) {
941  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
942  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
943  xmlFree(duration_val);
944  }
945  if (timescale_val) {
946  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
947  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
948  xmlFree(timescale_val);
949  }
950  if (startnumber_val) {
951  rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 10);
952  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
953  xmlFree(startnumber_val);
954  }
955  if (adaptionset_supplementalproperty_node) {
956  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
957  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
958  if (!val) {
959  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
960  } else {
961  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
962  xmlFree(val);
963  }
964  }
965  }
966 
967  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
968 
969  if (!fragment_timeline_node)
970  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
971  if (!fragment_timeline_node)
972  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
973  if (!fragment_timeline_node)
974  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
975  if (fragment_timeline_node) {
976  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
977  while (fragment_timeline_node) {
978  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
979  if (ret < 0) {
980  return ret;
981  }
982  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
983  }
984  }
985  } else if (representation_baseurl_node && !representation_segmentlist_node) {
986  seg = av_mallocz(sizeof(struct fragment));
987  if (!seg) {
988  ret = AVERROR(ENOMEM);
989  goto end;
990  }
991  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, NULL);
992  if (!seg->url) {
993  av_free(seg);
994  ret = AVERROR(ENOMEM);
995  goto end;
996  }
997  seg->size = -1;
998  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
999  } else if (representation_segmentlist_node) {
1000  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1001  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1002  xmlNodePtr fragmenturl_node = NULL;
1003  segmentlists_tab[0] = representation_segmentlist_node;
1004  segmentlists_tab[1] = adaptionset_segmentlist_node;
1005  segmentlists_tab[2] = period_segmentlist_node;
1006 
1007  duration_val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1008  timescale_val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1009  if (duration_val) {
1010  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
1011  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1012  xmlFree(duration_val);
1013  }
1014  if (timescale_val) {
1015  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
1016  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1017  xmlFree(timescale_val);
1018  }
1019  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1020  while (fragmenturl_node) {
1021  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1022  baseurl_nodes,
1023  rep_id_val,
1024  rep_bandwidth_val);
1025  if (ret < 0) {
1026  return ret;
1027  }
1028  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1029  }
1030 
1031  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
1032 
1033  if (!fragment_timeline_node)
1034  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
1035  if (!fragment_timeline_node)
1036  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1037  if (!fragment_timeline_node)
1038  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1039  if (fragment_timeline_node) {
1040  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1041  while (fragment_timeline_node) {
1042  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1043  if (ret < 0) {
1044  return ret;
1045  }
1046  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1047  }
1048  }
1049  } else {
1050  free_representation(rep);
1051  rep = NULL;
1052  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id[%s] \n", (const char *)rep_id_val);
1053  }
1054 
1055  if (rep) {
1056  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1057  rep->fragment_timescale = 1;
1058  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1059  strncpy(rep->id, rep_id_val ? rep_id_val : "", sizeof(rep->id));
1060  rep->framerate = av_make_q(0, 0);
1061  if (type == AVMEDIA_TYPE_VIDEO && rep_framerate_val) {
1062  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1063  if (ret < 0)
1064  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1065  }
1066 
1067  switch (type) {
1068  case AVMEDIA_TYPE_VIDEO:
1069  rep->rep_idx = video_rep_idx;
1070  dynarray_add(&c->videos, &c->n_videos, rep);
1071  break;
1072  case AVMEDIA_TYPE_AUDIO:
1073  rep->rep_idx = audio_rep_idx;
1074  dynarray_add(&c->audios, &c->n_audios, rep);
1075  break;
1076  case AVMEDIA_TYPE_SUBTITLE:
1077  rep->rep_idx = subtitle_rep_idx;
1078  dynarray_add(&c->subtitles, &c->n_subtitles, rep);
1079  break;
1080  default:
1081  av_log(s, AV_LOG_WARNING, "Unsupported the stream type %d\n", type);
1082  break;
1083  }
1084  }
1085  }
1086 
1087  video_rep_idx += type == AVMEDIA_TYPE_VIDEO;
1088  audio_rep_idx += type == AVMEDIA_TYPE_AUDIO;
1089  subtitle_rep_idx += type == AVMEDIA_TYPE_SUBTITLE;
1090 
1091 end:
1092  if (rep_id_val)
1093  xmlFree(rep_id_val);
1094  if (rep_bandwidth_val)
1095  xmlFree(rep_bandwidth_val);
1096  if (rep_framerate_val)
1097  xmlFree(rep_framerate_val);
1098 
1099  return ret;
1100 }
1101 
1103  xmlNodePtr adaptionset_node,
1104  xmlNodePtr mpd_baseurl_node,
1105  xmlNodePtr period_baseurl_node,
1106  xmlNodePtr period_segmenttemplate_node,
1107  xmlNodePtr period_segmentlist_node)
1108 {
1109  int ret = 0;
1110  DASHContext *c = s->priv_data;
1111  xmlNodePtr fragment_template_node = NULL;
1112  xmlNodePtr content_component_node = NULL;
1113  xmlNodePtr adaptionset_baseurl_node = NULL;
1114  xmlNodePtr adaptionset_segmentlist_node = NULL;
1115  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1116  xmlNodePtr node = NULL;
1117  c->adaptionset_contenttype_val = xmlGetProp(adaptionset_node, "contentType");
1118  c->adaptionset_par_val = xmlGetProp(adaptionset_node, "par");
1119  c->adaptionset_lang_val = xmlGetProp(adaptionset_node, "lang");
1120  c->adaptionset_minbw_val = xmlGetProp(adaptionset_node, "minBandwidth");
1121  c->adaptionset_maxbw_val = xmlGetProp(adaptionset_node, "maxBandwidth");
1122  c->adaptionset_minwidth_val = xmlGetProp(adaptionset_node, "minWidth");
1123  c->adaptionset_maxwidth_val = xmlGetProp(adaptionset_node, "maxWidth");
1124  c->adaptionset_minheight_val = xmlGetProp(adaptionset_node, "minHeight");
1125  c->adaptionset_maxheight_val = xmlGetProp(adaptionset_node, "maxHeight");
1126  c->adaptionset_minframerate_val = xmlGetProp(adaptionset_node, "minFrameRate");
1127  c->adaptionset_maxframerate_val = xmlGetProp(adaptionset_node, "maxFrameRate");
1128  c->adaptionset_segmentalignment_val = xmlGetProp(adaptionset_node, "segmentAlignment");
1129  c->adaptionset_bitstreamswitching_val = xmlGetProp(adaptionset_node, "bitstreamSwitching");
1130 
1131  node = xmlFirstElementChild(adaptionset_node);
1132  while (node) {
1133  if (!av_strcasecmp(node->name, (const char *)"SegmentTemplate")) {
1134  fragment_template_node = node;
1135  } else if (!av_strcasecmp(node->name, (const char *)"ContentComponent")) {
1136  content_component_node = node;
1137  } else if (!av_strcasecmp(node->name, (const char *)"BaseURL")) {
1138  adaptionset_baseurl_node = node;
1139  } else if (!av_strcasecmp(node->name, (const char *)"SegmentList")) {
1140  adaptionset_segmentlist_node = node;
1141  } else if (!av_strcasecmp(node->name, (const char *)"SupplementalProperty")) {
1142  adaptionset_supplementalproperty_node = node;
1143  } else if (!av_strcasecmp(node->name, (const char *)"Representation")) {
1144  ret = parse_manifest_representation(s, url, node,
1145  adaptionset_node,
1146  mpd_baseurl_node,
1147  period_baseurl_node,
1148  period_segmenttemplate_node,
1149  period_segmentlist_node,
1150  fragment_template_node,
1151  content_component_node,
1152  adaptionset_baseurl_node,
1153  adaptionset_segmentlist_node,
1154  adaptionset_supplementalproperty_node);
1155  if (ret < 0) {
1156  return ret;
1157  }
1158  }
1159  node = xmlNextElementSibling(node);
1160  }
1161  return 0;
1162 }
1163 
1164 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1165 {
1166  xmlChar *val = NULL;
1167 
1168  node = xmlFirstElementChild(node);
1169  while (node) {
1170  if (!av_strcasecmp(node->name, "Title")) {
1171  val = xmlNodeGetContent(node);
1172  if (val) {
1173  av_dict_set(&s->metadata, "Title", val, 0);
1174  }
1175  } else if (!av_strcasecmp(node->name, "Source")) {
1176  val = xmlNodeGetContent(node);
1177  if (val) {
1178  av_dict_set(&s->metadata, "Source", val, 0);
1179  }
1180  } else if (!av_strcasecmp(node->name, "Copyright")) {
1181  val = xmlNodeGetContent(node);
1182  if (val) {
1183  av_dict_set(&s->metadata, "Copyright", val, 0);
1184  }
1185  }
1186  node = xmlNextElementSibling(node);
1187  xmlFree(val);
1188  }
1189  return 0;
1190 }
1191 
1192 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1193 {
1194  DASHContext *c = s->priv_data;
1195  int ret = 0;
1196  int close_in = 0;
1197  uint8_t *new_url = NULL;
1198  int64_t filesize = 0;
1199  char *buffer = NULL;
1200  AVDictionary *opts = NULL;
1201  xmlDoc *doc = NULL;
1202  xmlNodePtr root_element = NULL;
1203  xmlNodePtr node = NULL;
1204  xmlNodePtr period_node = NULL;
1205  xmlNodePtr tmp_node = NULL;
1206  xmlNodePtr mpd_baseurl_node = NULL;
1207  xmlNodePtr period_baseurl_node = NULL;
1208  xmlNodePtr period_segmenttemplate_node = NULL;
1209  xmlNodePtr period_segmentlist_node = NULL;
1210  xmlNodePtr adaptionset_node = NULL;
1211  xmlAttrPtr attr = NULL;
1212  char *val = NULL;
1213  uint32_t period_duration_sec = 0;
1214  uint32_t period_start_sec = 0;
1215 
1216  if (!in) {
1217  close_in = 1;
1218 
1219  av_dict_copy(&opts, c->avio_opts, 0);
1220  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1221  av_dict_free(&opts);
1222  if (ret < 0)
1223  return ret;
1224  }
1225 
1226  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0) {
1227  c->base_url = av_strdup(new_url);
1228  } else {
1229  c->base_url = av_strdup(url);
1230  }
1231 
1232  filesize = avio_size(in);
1233  if (filesize <= 0) {
1234  filesize = 8 * 1024;
1235  }
1236 
1237  buffer = av_mallocz(filesize);
1238  if (!buffer) {
1239  av_free(c->base_url);
1240  return AVERROR(ENOMEM);
1241  }
1242 
1243  filesize = avio_read(in, buffer, filesize);
1244  if (filesize <= 0) {
1245  av_log(s, AV_LOG_ERROR, "Unable to read to offset '%s'\n", url);
1246  ret = AVERROR_INVALIDDATA;
1247  } else {
1248  LIBXML_TEST_VERSION
1249 
1250  doc = xmlReadMemory(buffer, filesize, c->base_url, NULL, 0);
1251  root_element = xmlDocGetRootElement(doc);
1252  node = root_element;
1253 
1254  if (!node) {
1255  ret = AVERROR_INVALIDDATA;
1256  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1257  goto cleanup;
1258  }
1259 
1260  if (node->type != XML_ELEMENT_NODE ||
1261  av_strcasecmp(node->name, (const char *)"MPD")) {
1262  ret = AVERROR_INVALIDDATA;
1263  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1264  goto cleanup;
1265  }
1266 
1267  val = xmlGetProp(node, "type");
1268  if (!val) {
1269  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1270  ret = AVERROR_INVALIDDATA;
1271  goto cleanup;
1272  }
1273  if (!av_strcasecmp(val, (const char *)"dynamic"))
1274  c->is_live = 1;
1275  xmlFree(val);
1276 
1277  attr = node->properties;
1278  while (attr) {
1279  val = xmlGetProp(node, attr->name);
1280 
1281  if (!av_strcasecmp(attr->name, (const char *)"availabilityStartTime")) {
1282  c->availability_start_time = get_utc_date_time_insec(s, (const char *)val);
1283  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1284  } else if (!av_strcasecmp(attr->name, (const char *)"availabilityEndTime")) {
1285  c->availability_end_time = get_utc_date_time_insec(s, (const char *)val);
1286  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1287  } else if (!av_strcasecmp(attr->name, (const char *)"publishTime")) {
1288  c->publish_time = get_utc_date_time_insec(s, (const char *)val);
1289  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1290  } else if (!av_strcasecmp(attr->name, (const char *)"minimumUpdatePeriod")) {
1291  c->minimum_update_period = get_duration_insec(s, (const char *)val);
1292  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1293  } else if (!av_strcasecmp(attr->name, (const char *)"timeShiftBufferDepth")) {
1294  c->time_shift_buffer_depth = get_duration_insec(s, (const char *)val);
1295  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1296  } else if (!av_strcasecmp(attr->name, (const char *)"minBufferTime")) {
1297  c->min_buffer_time = get_duration_insec(s, (const char *)val);
1298  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1299  } else if (!av_strcasecmp(attr->name, (const char *)"suggestedPresentationDelay")) {
1300  c->suggested_presentation_delay = get_duration_insec(s, (const char *)val);
1301  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1302  } else if (!av_strcasecmp(attr->name, (const char *)"mediaPresentationDuration")) {
1303  c->media_presentation_duration = get_duration_insec(s, (const char *)val);
1304  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1305  }
1306  attr = attr->next;
1307  xmlFree(val);
1308  }
1309 
1310  tmp_node = find_child_node_by_name(node, "BaseURL");
1311  if (tmp_node) {
1312  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1313  } else {
1314  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1315  }
1316 
1317  // at now we can handle only one period, with the longest duration
1318  node = xmlFirstElementChild(node);
1319  while (node) {
1320  if (!av_strcasecmp(node->name, (const char *)"Period")) {
1321  period_duration_sec = 0;
1322  period_start_sec = 0;
1323  attr = node->properties;
1324  while (attr) {
1325  val = xmlGetProp(node, attr->name);
1326  if (!av_strcasecmp(attr->name, (const char *)"duration")) {
1327  period_duration_sec = get_duration_insec(s, (const char *)val);
1328  } else if (!av_strcasecmp(attr->name, (const char *)"start")) {
1329  period_start_sec = get_duration_insec(s, (const char *)val);
1330  }
1331  attr = attr->next;
1332  xmlFree(val);
1333  }
1334  if ((period_duration_sec) >= (c->period_duration)) {
1335  period_node = node;
1336  c->period_duration = period_duration_sec;
1337  c->period_start = period_start_sec;
1338  if (c->period_start > 0)
1340  }
1341  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1342  parse_programinformation(s, node);
1343  }
1344  node = xmlNextElementSibling(node);
1345  }
1346  if (!period_node) {
1347  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1348  ret = AVERROR_INVALIDDATA;
1349  goto cleanup;
1350  }
1351 
1352  adaptionset_node = xmlFirstElementChild(period_node);
1353  while (adaptionset_node) {
1354  if (!av_strcasecmp(adaptionset_node->name, (const char *)"BaseURL")) {
1355  period_baseurl_node = adaptionset_node;
1356  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentTemplate")) {
1357  period_segmenttemplate_node = adaptionset_node;
1358  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentList")) {
1359  period_segmentlist_node = adaptionset_node;
1360  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"AdaptationSet")) {
1361  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1362  }
1363  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1364  }
1365 cleanup:
1366  /*free the document */
1367  xmlFreeDoc(doc);
1368  xmlCleanupParser();
1369  xmlFreeNode(mpd_baseurl_node);
1370  }
1371 
1372  av_free(new_url);
1373  av_free(buffer);
1374  if (close_in) {
1375  avio_close(in);
1376  }
1377  return ret;
1378 }
1379 
1380 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1381 {
1382  DASHContext *c = s->priv_data;
1383  int64_t num = 0;
1384  int64_t start_time_offset = 0;
1385 
1386  if (c->is_live) {
1387  if (pls->n_fragments) {
1388  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1389  num = pls->first_seq_no;
1390  } else if (pls->n_timelines) {
1391  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1392  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1393  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1394  if (num == -1)
1395  num = pls->first_seq_no;
1396  else
1397  num += pls->first_seq_no;
1398  } else if (pls->fragment_duration){
1399  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1400  if (pls->presentation_timeoffset) {
1402  } else if (c->publish_time > 0 && !c->availability_start_time) {
1403  if (c->min_buffer_time) {
1405  } else {
1407  }
1408  } else {
1410  }
1411  }
1412  } else {
1413  num = pls->first_seq_no;
1414  }
1415  return num;
1416 }
1417 
1418 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1419 {
1420  DASHContext *c = s->priv_data;
1421  int64_t num = 0;
1422 
1423  if (c->is_live && pls->fragment_duration) {
1424  av_log(s, AV_LOG_TRACE, "in live mode\n");
1426  } else {
1427  num = pls->first_seq_no;
1428  }
1429  return num;
1430 }
1431 
1432 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1433 {
1434  int64_t num = 0;
1435 
1436  if (pls->n_fragments) {
1437  num = pls->first_seq_no + pls->n_fragments - 1;
1438  } else if (pls->n_timelines) {
1439  int i = 0;
1440  num = pls->first_seq_no + pls->n_timelines - 1;
1441  for (i = 0; i < pls->n_timelines; i++) {
1442  if (pls->timelines[i]->repeat == -1) {
1443  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1444  num = c->period_duration / length_of_each_segment;
1445  } else {
1446  num += pls->timelines[i]->repeat;
1447  }
1448  }
1449  } else if (c->is_live && pls->fragment_duration) {
1451  } else if (pls->fragment_duration) {
1453  }
1454 
1455  return num;
1456 }
1457 
1458 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1459 {
1460  if (rep_dest && rep_src ) {
1461  free_timelines_list(rep_dest);
1462  rep_dest->timelines = rep_src->timelines;
1463  rep_dest->n_timelines = rep_src->n_timelines;
1464  rep_dest->first_seq_no = rep_src->first_seq_no;
1465  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1466  rep_src->timelines = NULL;
1467  rep_src->n_timelines = 0;
1468  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1469  }
1470 }
1471 
1472 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1473 {
1474  if (rep_dest && rep_src ) {
1475  free_fragment_list(rep_dest);
1476  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1477  rep_dest->cur_seq_no = 0;
1478  else
1479  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1480  rep_dest->fragments = rep_src->fragments;
1481  rep_dest->n_fragments = rep_src->n_fragments;
1482  rep_dest->parent = rep_src->parent;
1483  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1484  rep_src->fragments = NULL;
1485  rep_src->n_fragments = 0;
1486  }
1487 }
1488 
1489 
1491 {
1492  int ret = 0, i;
1493  DASHContext *c = s->priv_data;
1494  // save current context
1495  int n_videos = c->n_videos;
1496  struct representation **videos = c->videos;
1497  int n_audios = c->n_audios;
1498  struct representation **audios = c->audios;
1499  int n_subtitles = c->n_subtitles;
1500  struct representation **subtitles = c->subtitles;
1501  char *base_url = c->base_url;
1502 
1503  c->base_url = NULL;
1504  c->n_videos = 0;
1505  c->videos = NULL;
1506  c->n_audios = 0;
1507  c->audios = NULL;
1508  c->n_subtitles = 0;
1509  c->subtitles = NULL;
1510  ret = parse_manifest(s, s->url, NULL);
1511  if (ret)
1512  goto finish;
1513 
1514  if (c->n_videos != n_videos) {
1515  av_log(c, AV_LOG_ERROR,
1516  "new manifest has mismatched no. of video representations, %d -> %d\n",
1517  n_videos, c->n_videos);
1518  return AVERROR_INVALIDDATA;
1519  }
1520  if (c->n_audios != n_audios) {
1521  av_log(c, AV_LOG_ERROR,
1522  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1523  n_audios, c->n_audios);
1524  return AVERROR_INVALIDDATA;
1525  }
1526  if (c->n_subtitles != n_subtitles) {
1527  av_log(c, AV_LOG_ERROR,
1528  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1529  n_subtitles, c->n_subtitles);
1530  return AVERROR_INVALIDDATA;
1531  }
1532 
1533  for (i = 0; i < n_videos; i++) {
1534  struct representation *cur_video = videos[i];
1535  struct representation *ccur_video = c->videos[i];
1536  if (cur_video->timelines) {
1537  // calc current time
1538  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1539  // update segments
1540  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1541  if (ccur_video->cur_seq_no >= 0) {
1542  move_timelines(ccur_video, cur_video, c);
1543  }
1544  }
1545  if (cur_video->fragments) {
1546  move_segments(ccur_video, cur_video, c);
1547  }
1548  }
1549  for (i = 0; i < n_audios; i++) {
1550  struct representation *cur_audio = audios[i];
1551  struct representation *ccur_audio = c->audios[i];
1552  if (cur_audio->timelines) {
1553  // calc current time
1554  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1555  // update segments
1556  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1557  if (ccur_audio->cur_seq_no >= 0) {
1558  move_timelines(ccur_audio, cur_audio, c);
1559  }
1560  }
1561  if (cur_audio->fragments) {
1562  move_segments(ccur_audio, cur_audio, c);
1563  }
1564  }
1565 
1566 finish:
1567  // restore context
1568  if (c->base_url)
1569  av_free(base_url);
1570  else
1571  c->base_url = base_url;
1572 
1573  if (c->subtitles)
1574  free_subtitle_list(c);
1575  if (c->audios)
1576  free_audio_list(c);
1577  if (c->videos)
1578  free_video_list(c);
1579 
1580  c->n_subtitles = n_subtitles;
1581  c->subtitles = subtitles;
1582  c->n_audios = n_audios;
1583  c->audios = audios;
1584  c->n_videos = n_videos;
1585  c->videos = videos;
1586  return ret;
1587 }
1588 
1589 static struct fragment *get_current_fragment(struct representation *pls)
1590 {
1591  int64_t min_seq_no = 0;
1592  int64_t max_seq_no = 0;
1593  struct fragment *seg = NULL;
1594  struct fragment *seg_ptr = NULL;
1595  DASHContext *c = pls->parent->priv_data;
1596 
1597  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1598  if (pls->cur_seq_no < pls->n_fragments) {
1599  seg_ptr = pls->fragments[pls->cur_seq_no];
1600  seg = av_mallocz(sizeof(struct fragment));
1601  if (!seg) {
1602  return NULL;
1603  }
1604  seg->url = av_strdup(seg_ptr->url);
1605  if (!seg->url) {
1606  av_free(seg);
1607  return NULL;
1608  }
1609  seg->size = seg_ptr->size;
1610  seg->url_offset = seg_ptr->url_offset;
1611  return seg;
1612  } else if (c->is_live) {
1613  refresh_manifest(pls->parent);
1614  } else {
1615  break;
1616  }
1617  }
1618  if (c->is_live) {
1619  min_seq_no = calc_min_seg_no(pls->parent, pls);
1620  max_seq_no = calc_max_seg_no(pls, c);
1621 
1622  if (pls->timelines || pls->fragments) {
1623  refresh_manifest(pls->parent);
1624  }
1625  if (pls->cur_seq_no <= min_seq_no) {
1626  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"], playlist %d\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no, (int)pls->rep_idx);
1627  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1628  } else if (pls->cur_seq_no > max_seq_no) {
1629  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"], playlist %d\n", min_seq_no, max_seq_no, (int)pls->rep_idx);
1630  }
1631  seg = av_mallocz(sizeof(struct fragment));
1632  if (!seg) {
1633  return NULL;
1634  }
1635  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1636  seg = av_mallocz(sizeof(struct fragment));
1637  if (!seg) {
1638  return NULL;
1639  }
1640  }
1641  if (seg) {
1642  char *tmpfilename= av_mallocz(c->max_url_size);
1643  if (!tmpfilename) {
1644  return NULL;
1645  }
1647  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1648  if (!seg->url) {
1649  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1650  seg->url = av_strdup(pls->url_template);
1651  if (!seg->url) {
1652  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1653  av_free(tmpfilename);
1654  return NULL;
1655  }
1656  }
1657  av_free(tmpfilename);
1658  seg->size = -1;
1659  }
1660 
1661  return seg;
1662 }
1663 
1664 static int read_from_url(struct representation *pls, struct fragment *seg,
1665  uint8_t *buf, int buf_size)
1666 {
1667  int ret;
1668 
1669  /* limit read if the fragment was only a part of a file */
1670  if (seg->size >= 0)
1671  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1672 
1673  ret = avio_read(pls->input, buf, buf_size);
1674  if (ret > 0)
1675  pls->cur_seg_offset += ret;
1676 
1677  return ret;
1678 }
1679 
1680 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1681 {
1682  AVDictionary *opts = NULL;
1683  char *url = NULL;
1684  int ret = 0;
1685 
1686  url = av_mallocz(c->max_url_size);
1687  if (!url) {
1688  ret = AVERROR(ENOMEM);
1689  goto cleanup;
1690  }
1691 
1692  if (seg->size >= 0) {
1693  /* try to restrict the HTTP request to the part we want
1694  * (if this is in fact a HTTP request) */
1695  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1696  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1697  }
1698 
1699  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1700  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64", playlist %d\n",
1701  url, seg->url_offset, pls->rep_idx);
1702  ret = open_url(pls->parent, &pls->input, url, c->avio_opts, opts, NULL);
1703 
1704 cleanup:
1705  av_free(url);
1706  av_dict_free(&opts);
1707  pls->cur_seg_offset = 0;
1708  pls->cur_seg_size = seg->size;
1709  return ret;
1710 }
1711 
1712 static int update_init_section(struct representation *pls)
1713 {
1714  static const int max_init_section_size = 1024 * 1024;
1715  DASHContext *c = pls->parent->priv_data;
1716  int64_t sec_size;
1717  int64_t urlsize;
1718  int ret;
1719 
1720  if (!pls->init_section || pls->init_sec_buf)
1721  return 0;
1722 
1723  ret = open_input(c, pls, pls->init_section);
1724  if (ret < 0) {
1726  "Failed to open an initialization section in playlist %d\n",
1727  pls->rep_idx);
1728  return ret;
1729  }
1730 
1731  if (pls->init_section->size >= 0)
1732  sec_size = pls->init_section->size;
1733  else if ((urlsize = avio_size(pls->input)) >= 0)
1734  sec_size = urlsize;
1735  else
1736  sec_size = max_init_section_size;
1737 
1738  av_log(pls->parent, AV_LOG_DEBUG,
1739  "Downloading an initialization section of size %"PRId64"\n",
1740  sec_size);
1741 
1742  sec_size = FFMIN(sec_size, max_init_section_size);
1743 
1744  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1745 
1746  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1747  pls->init_sec_buf_size);
1748  ff_format_io_close(pls->parent, &pls->input);
1749 
1750  if (ret < 0)
1751  return ret;
1752 
1753  pls->init_sec_data_len = ret;
1754  pls->init_sec_buf_read_offset = 0;
1755 
1756  return 0;
1757 }
1758 
1759 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1760 {
1761  struct representation *v = opaque;
1762  if (v->n_fragments && !v->init_sec_data_len) {
1763  return avio_seek(v->input, offset, whence);
1764  }
1765 
1766  return AVERROR(ENOSYS);
1767 }
1768 
1769 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1770 {
1771  int ret = 0;
1772  struct representation *v = opaque;
1773  DASHContext *c = v->parent->priv_data;
1774 
1775 restart:
1776  if (!v->input) {
1777  free_fragment(&v->cur_seg);
1778  v->cur_seg = get_current_fragment(v);
1779  if (!v->cur_seg) {
1780  ret = AVERROR_EOF;
1781  goto end;
1782  }
1783 
1784  /* load/update Media Initialization Section, if any */
1785  ret = update_init_section(v);
1786  if (ret)
1787  goto end;
1788 
1789  ret = open_input(c, v, v->cur_seg);
1790  if (ret < 0) {
1792  ret = AVERROR_EXIT;
1793  goto end;
1794  }
1795  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist %d\n", v->rep_idx);
1796  v->cur_seq_no++;
1797  goto restart;
1798  }
1799  }
1800 
1802  /* Push init section out first before first actual fragment */
1803  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1804  memcpy(buf, v->init_sec_buf, copy_size);
1805  v->init_sec_buf_read_offset += copy_size;
1806  ret = copy_size;
1807  goto end;
1808  }
1809 
1810  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1811  if (!v->cur_seg) {
1812  v->cur_seg = get_current_fragment(v);
1813  }
1814  if (!v->cur_seg) {
1815  ret = AVERROR_EOF;
1816  goto end;
1817  }
1818  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1819  if (ret > 0)
1820  goto end;
1821 
1822  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1823  if (!v->is_restart_needed)
1824  v->cur_seq_no++;
1825  v->is_restart_needed = 1;
1826  }
1827 
1828 end:
1829  return ret;
1830 }
1831 
1833 {
1834  DASHContext *c = s->priv_data;
1835  const char *opts[] = {
1836  "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", NULL };
1837  const char **opt = opts;
1838  uint8_t *buf = NULL;
1839  int ret = 0;
1840 
1841  while (*opt) {
1842  if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
1843  if (buf[0] != '\0') {
1844  ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL);
1845  if (ret < 0) {
1846  av_freep(&buf);
1847  return ret;
1848  }
1849  } else {
1850  av_freep(&buf);
1851  }
1852  }
1853  opt++;
1854  }
1855 
1856  return ret;
1857 }
1858 
1859 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1860  int flags, AVDictionary **opts)
1861 {
1862  av_log(s, AV_LOG_ERROR,
1863  "A DASH playlist item '%s' referred to an external file '%s'. "
1864  "Opening this file was forbidden for security reasons\n",
1865  s->url, url);
1866  return AVERROR(EPERM);
1867 }
1868 
1870 {
1871  /* note: the internal buffer could have changed */
1872  av_freep(&pls->pb.buffer);
1873  memset(&pls->pb, 0x00, sizeof(AVIOContext));
1874  pls->ctx->pb = NULL;
1875  avformat_close_input(&pls->ctx);
1876  pls->ctx = NULL;
1877 }
1878 
1880 {
1881  DASHContext *c = s->priv_data;
1882  ff_const59 AVInputFormat *in_fmt = NULL;
1883  AVDictionary *in_fmt_opts = NULL;
1884  uint8_t *avio_ctx_buffer = NULL;
1885  int ret = 0, i;
1886 
1887  if (pls->ctx) {
1889  }
1890 
1892  ret = AVERROR_EXIT;
1893  goto fail;
1894  }
1895 
1896  if (!(pls->ctx = avformat_alloc_context())) {
1897  ret = AVERROR(ENOMEM);
1898  goto fail;
1899  }
1900 
1901  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1902  if (!avio_ctx_buffer ) {
1903  ret = AVERROR(ENOMEM);
1904  avformat_free_context(pls->ctx);
1905  pls->ctx = NULL;
1906  goto fail;
1907  }
1908  if (c->is_live) {
1909  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, NULL);
1910  } else {
1911  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, seek_data);
1912  }
1913  pls->pb.seekable = 0;
1914 
1915  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1916  goto fail;
1917 
1918  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1919  pls->ctx->probesize = 1024 * 4;
1921  ret = av_probe_input_buffer(&pls->pb, &in_fmt, "", NULL, 0, 0);
1922  if (ret < 0) {
1923  av_log(s, AV_LOG_ERROR, "Error when loading first fragment, playlist %d\n", (int)pls->rep_idx);
1924  avformat_free_context(pls->ctx);
1925  pls->ctx = NULL;
1926  goto fail;
1927  }
1928 
1929  pls->ctx->pb = &pls->pb;
1930  pls->ctx->io_open = nested_io_open;
1931 
1932  // provide additional information from mpd if available
1933  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1934  av_dict_free(&in_fmt_opts);
1935  if (ret < 0)
1936  goto fail;
1937  if (pls->n_fragments) {
1938 #if FF_API_R_FRAME_RATE
1939  if (pls->framerate.den) {
1940  for (i = 0; i < pls->ctx->nb_streams; i++)
1941  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1942  }
1943 #endif
1944  ret = avformat_find_stream_info(pls->ctx, NULL);
1945  if (ret < 0)
1946  goto fail;
1947  }
1948 
1949 fail:
1950  return ret;
1951 }
1952 
1954 {
1955  int ret = 0;
1956  int i;
1957 
1958  pls->parent = s;
1959  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1960 
1961  if (!pls->last_seq_no) {
1962  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1963  }
1964 
1965  ret = reopen_demux_for_component(s, pls);
1966  if (ret < 0) {
1967  goto fail;
1968  }
1969  for (i = 0; i < pls->ctx->nb_streams; i++) {
1970  AVStream *st = avformat_new_stream(s, NULL);
1971  AVStream *ist = pls->ctx->streams[i];
1972  if (!st) {
1973  ret = AVERROR(ENOMEM);
1974  goto fail;
1975  }
1976  st->id = i;
1979  }
1980 
1981  return 0;
1982 fail:
1983  return ret;
1984 }
1985 
1986 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1987 {
1988  struct fragment *first_init_section = pls[0]->init_section;
1989  char *url =NULL;
1990  int64_t url_offset = -1;
1991  int64_t size = -1;
1992  int i = 0;
1993 
1994  if (first_init_section == NULL || n_pls == 0)
1995  return 0;
1996 
1997  url = first_init_section->url;
1998  url_offset = first_init_section->url_offset;
1999  size = pls[0]->init_section->size;
2000  for (i=0;i<n_pls;i++) {
2001  if (av_strcasecmp(pls[i]->init_section->url,url) || pls[i]->init_section->url_offset != url_offset || pls[i]->init_section->size != size) {
2002  return 0;
2003  }
2004  }
2005  return 1;
2006 }
2007 
2008 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
2009 {
2010  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
2011  if (!rep_dest->init_sec_buf) {
2012  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
2013  return AVERROR(ENOMEM);
2014  }
2015  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2016  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2017  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2018  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2019 
2020  return 0;
2021 }
2022 
2023 
2025 {
2026  DASHContext *c = s->priv_data;
2027  struct representation *rep;
2028  int ret = 0;
2029  int stream_index = 0;
2030  int i;
2031 
2033 
2034  if ((ret = save_avio_options(s)) < 0)
2035  goto fail;
2036 
2037  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2038  goto fail;
2039 
2040  /* If this isn't a live stream, fill the total duration of the
2041  * stream. */
2042  if (!c->is_live) {
2044  } else {
2045  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2046  }
2047 
2048  if(c->n_videos)
2050 
2051  /* Open the demuxer for video and audio components if available */
2052  for (i = 0; i < c->n_videos; i++) {
2053  rep = c->videos[i];
2054  if (i > 0 && c->is_init_section_common_video) {
2055  ret = copy_init_section(rep, c->videos[0]);
2056  if (ret < 0)
2057  goto fail;
2058  }
2059  ret = open_demux_for_component(s, rep);
2060 
2061  if (ret)
2062  goto fail;
2063  rep->stream_index = stream_index;
2064  ++stream_index;
2065  }
2066 
2067  if(c->n_audios)
2069 
2070  for (i = 0; i < c->n_audios; i++) {
2071  rep = c->audios[i];
2072  if (i > 0 && c->is_init_section_common_audio) {
2073  ret = copy_init_section(rep, c->audios[0]);
2074  if (ret < 0)
2075  goto fail;
2076  }
2077  ret = open_demux_for_component(s, rep);
2078 
2079  if (ret)
2080  goto fail;
2081  rep->stream_index = stream_index;
2082  ++stream_index;
2083  }
2084 
2085  if (c->n_subtitles)
2087 
2088  for (i = 0; i < c->n_subtitles; i++) {
2089  rep = c->subtitles[i];
2090  if (i > 0 && c->is_init_section_common_audio) {
2091  ret = copy_init_section(rep, c->subtitles[0]);
2092  if (ret < 0)
2093  goto fail;
2094  }
2095  ret = open_demux_for_component(s, rep);
2096 
2097  if (ret)
2098  goto fail;
2099  rep->stream_index = stream_index;
2100  ++stream_index;
2101  }
2102 
2103  if (!stream_index) {
2104  ret = AVERROR_INVALIDDATA;
2105  goto fail;
2106  }
2107 
2108  /* Create a program */
2109  if (!ret) {
2110  AVProgram *program;
2111  program = av_new_program(s, 0);
2112  if (!program) {
2113  goto fail;
2114  }
2115 
2116  for (i = 0; i < c->n_videos; i++) {
2117  rep = c->videos[i];
2119  rep->assoc_stream = s->streams[rep->stream_index];
2120  if (rep->bandwidth > 0)
2121  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2122  if (rep->id[0])
2123  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2124  }
2125  for (i = 0; i < c->n_audios; i++) {
2126  rep = c->audios[i];
2128  rep->assoc_stream = s->streams[rep->stream_index];
2129  if (rep->bandwidth > 0)
2130  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2131  if (rep->id[0])
2132  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2133  }
2134  for (i = 0; i < c->n_subtitles; i++) {
2135  rep = c->subtitles[i];
2137  rep->assoc_stream = s->streams[rep->stream_index];
2138  if (rep->id[0])
2139  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2140  }
2141  }
2142 
2143  return 0;
2144 fail:
2145  return ret;
2146 }
2147 
2149 {
2150  int i, j;
2151 
2152  for (i = 0; i < n; i++) {
2153  struct representation *pls = p[i];
2154  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2155 
2156  if (needed && !pls->ctx) {
2157  pls->cur_seg_offset = 0;
2158  pls->init_sec_buf_read_offset = 0;
2159  /* Catch up */
2160  for (j = 0; j < n; j++) {
2161  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2162  }
2164  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2165  } else if (!needed && pls->ctx) {
2167  if (pls->input)
2168  ff_format_io_close(pls->parent, &pls->input);
2169  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2170  }
2171  }
2172 }
2173 
2175 {
2176  DASHContext *c = s->priv_data;
2177  int ret = 0, i;
2178  int64_t mints = 0;
2179  struct representation *cur = NULL;
2180  struct representation *rep = NULL;
2181 
2185 
2186  for (i = 0; i < c->n_videos; i++) {
2187  rep = c->videos[i];
2188  if (!rep->ctx)
2189  continue;
2190  if (!cur || rep->cur_timestamp < mints) {
2191  cur = rep;
2192  mints = rep->cur_timestamp;
2193  }
2194  }
2195  for (i = 0; i < c->n_audios; i++) {
2196  rep = c->audios[i];
2197  if (!rep->ctx)
2198  continue;
2199  if (!cur || rep->cur_timestamp < mints) {
2200  cur = rep;
2201  mints = rep->cur_timestamp;
2202  }
2203  }
2204 
2205  for (i = 0; i < c->n_subtitles; i++) {
2206  rep = c->subtitles[i];
2207  if (!rep->ctx)
2208  continue;
2209  if (!cur || rep->cur_timestamp < mints) {
2210  cur = rep;
2211  mints = rep->cur_timestamp;
2212  }
2213  }
2214 
2215  if (!cur) {
2216  return AVERROR_INVALIDDATA;
2217  }
2218  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2219  ret = av_read_frame(cur->ctx, pkt);
2220  if (ret >= 0) {
2221  /* If we got a packet, return it */
2222  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2223  pkt->stream_index = cur->stream_index;
2224  return 0;
2225  }
2226  if (cur->is_restart_needed) {
2227  cur->cur_seg_offset = 0;
2228  cur->init_sec_buf_read_offset = 0;
2229  if (cur->input)
2230  ff_format_io_close(cur->parent, &cur->input);
2231  ret = reopen_demux_for_component(s, cur);
2232  cur->is_restart_needed = 0;
2233  }
2234  }
2235  return AVERROR_EOF;
2236 }
2237 
2239 {
2240  DASHContext *c = s->priv_data;
2241  free_audio_list(c);
2242  free_video_list(c);
2243  av_dict_free(&c->avio_opts);
2244  av_freep(&c->base_url);
2245  return 0;
2246 }
2247 
2248 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2249 {
2250  int ret = 0;
2251  int i = 0;
2252  int j = 0;
2253  int64_t duration = 0;
2254 
2255  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms], playlist %d%s\n",
2256  seek_pos_msec, pls->rep_idx, dry_run ? " (dry)" : "");
2257 
2258  // single fragment mode
2259  if (pls->n_fragments == 1) {
2260  pls->cur_timestamp = 0;
2261  pls->cur_seg_offset = 0;
2262  if (dry_run)
2263  return 0;
2264  ff_read_frame_flush(pls->ctx);
2265  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2266  }
2267 
2268  if (pls->input)
2269  ff_format_io_close(pls->parent, &pls->input);
2270 
2271  // find the nearest fragment
2272  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2273  int64_t num = pls->first_seq_no;
2274  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2275  "last_seq_no[%"PRId64"], playlist %d.\n",
2276  (int)pls->n_timelines, (int64_t)pls->last_seq_no, (int)pls->rep_idx);
2277  for (i = 0; i < pls->n_timelines; i++) {
2278  if (pls->timelines[i]->starttime > 0) {
2279  duration = pls->timelines[i]->starttime;
2280  }
2281  duration += pls->timelines[i]->duration;
2282  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2283  goto set_seq_num;
2284  }
2285  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2286  duration += pls->timelines[i]->duration;
2287  num++;
2288  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2289  goto set_seq_num;
2290  }
2291  }
2292  num++;
2293  }
2294 
2295 set_seq_num:
2296  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2297  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"], playlist %d.\n",
2298  (int64_t)pls->cur_seq_no, (int)pls->rep_idx);
2299  } else if (pls->fragment_duration > 0) {
2300  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2301  } else {
2302  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2303  pls->cur_seq_no = pls->first_seq_no;
2304  }
2305  pls->cur_timestamp = 0;
2306  pls->cur_seg_offset = 0;
2307  pls->init_sec_buf_read_offset = 0;
2308  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2309 
2310  return ret;
2311 }
2312 
2313 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2314 {
2315  int ret = 0, i;
2316  DASHContext *c = s->priv_data;
2317  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2318  s->streams[stream_index]->time_base.den,
2319  flags & AVSEEK_FLAG_BACKWARD ?
2321  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2322  return AVERROR(ENOSYS);
2323 
2324  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2325  for (i = 0; i < c->n_videos; i++) {
2326  if (!ret)
2327  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2328  }
2329  for (i = 0; i < c->n_audios; i++) {
2330  if (!ret)
2331  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2332  }
2333  for (i = 0; i < c->n_subtitles; i++) {
2334  if (!ret)
2335  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2336  }
2337 
2338  return ret;
2339 }
2340 
2341 static int dash_probe(const AVProbeData *p)
2342 {
2343  if (!av_stristr(p->buf, "<MPD"))
2344  return 0;
2345 
2346  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2347  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2348  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2349  av_stristr(p->buf, "dash:profile:isoff-main:2011")) {
2350  return AVPROBE_SCORE_MAX;
2351  }
2352  if (av_stristr(p->buf, "dash:profile")) {
2353  return AVPROBE_SCORE_MAX;
2354  }
2355 
2356  return 0;
2357 }
2358 
2359 #define OFFSET(x) offsetof(DASHContext, x)
2360 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2361 static const AVOption dash_options[] = {
2362  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2363  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2364  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm"},
2365  INT_MIN, INT_MAX, FLAGS},
2366  {NULL}
2367 };
2368 
2369 static const AVClass dash_class = {
2370  .class_name = "dash",
2371  .item_name = av_default_item_name,
2372  .option = dash_options,
2373  .version = LIBAVUTIL_VERSION_INT,
2374 };
2375 
2377  .name = "dash",
2378  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2379  .priv_class = &dash_class,
2380  .priv_data_size = sizeof(DASHContext),
2387 };
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:568
int64_t cur_seg_size
Definition: dashdec.c:109
#define FLAGS
Definition: dashdec.c:2360
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2511
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1524
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1940
AVIOContext * input
Definition: dashdec.c:78
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
void ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:80
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
int64_t url_offset
Definition: dashdec.c:34
int n_fragments
Definition: dashdec.c:92
char * allowed_extensions
Definition: dashdec.c:162
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1636
AVOption.
Definition: opt.h:246
int n_audios
Definition: dashdec.c:141
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:262
int n_timelines
Definition: dashdec.c:95
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVPacket pkt
Definition: dashdec.c:81
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1769
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4886
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: utils.c:164
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle...
Definition: avstring.c:56
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:153
static int ishttp(char *url)
Definition: dashdec.c:172
int num
Numerator.
Definition: rational.h:59
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:246
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
#define AVIO_FLAG_READ
read-only
Definition: avio.h:654
int64_t size
Definition: dashdec.c:35
unsigned char * buffer
Start of the buffer.
Definition: avio.h:226
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1589
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1664
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:537
static const AVOption dash_options[]
Definition: dashdec.c:2361
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1759
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
discard all
Definition: avcodec.h:811
static AVPacket pkt
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:312
int64_t cur_timestamp
Definition: dashdec.c:118
int n_videos
Definition: dashdec.c:139
uint64_t availability_end_time
Definition: dashdec.c:150
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:609
int is_init_section_common_audio
Definition: dashdec.c:168
uint64_t min_buffer_time
Definition: dashdec.c:154
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:327
Format I/O context.
Definition: avformat.h:1358
#define MAX_URL_SIZE
Definition: internal.h:30
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: utils.c:1918
struct fragment * init_section
Definition: dashdec.c:113
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:117
int stream_index
Definition: dashdec.c:84
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:188
static int64_t start_time
Definition: ffplay.c:331
uint64_t suggested_presentation_delay
Definition: dashdec.c:148
uint8_t
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1164
Round toward +infinity.
Definition: mathematics.h:83
#define av_malloc(s)
uint64_t media_presentation_duration
Definition: dashdec.c:147
AVOptions.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
char * adaptionset_maxframerate_val
Definition: dashdec.c:135
int64_t presentation_timeoffset
Definition: dashdec.c:105
int id
Format-specific stream ID.
Definition: avformat.h:888
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2238
void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: utils.c:5641
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
uint64_t period_duration
Definition: dashdec.c:157
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4459
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
int64_t duration
Definition: movenc.c:63
int64_t first_seq_no
Definition: dashdec.c:98
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
AVIOContext pb
Definition: dashdec.c:77
static void finish(void)
Definition: movenc.c:345
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1489
AVProgram * av_new_program(AVFormatContext *s, int id)
Definition: utils.c:4558
struct timeline ** timelines
Definition: dashdec.c:96
#define AVERROR_EOF
End of file.
Definition: error.h:55
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:38
uint64_t publish_time
Definition: dashdec.c:151
ptrdiff_t size
Definition: opengl_enc.c:100
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2148
uint64_t availability_start_time
Definition: dashdec.c:149
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:565
#define av_log(a,...)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:647
struct representation ** audios
Definition: dashdec.c:142
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:31
char * adaptionset_bitstreamswitching_val
Definition: dashdec.c:137
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:548
static int aligned(int val)
Definition: dashdec.c:178
struct representation ** subtitles
Definition: dashdec.c:144
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
#define ff_const59
The ff_const59 define is not part of the public API and will be removed without further warning...
Definition: avformat.h:549
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2020
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1598
char * adaptionset_minbw_val
Definition: dashdec.c:128
uint32_t init_sec_data_len
Definition: dashdec.c:116
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:347
int64_t starttime
Definition: dashdec.c:57
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1472
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1432
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1380
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1183
char * url
input or output URL.
Definition: avformat.h:1454
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1986
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2313
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:235
char * adaptionset_par_val
Definition: dashdec.c:126
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1275
#define FFMAX(a, b)
Definition: common.h:94
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:120
char * adaptionset_segmentalignment_val
Definition: dashdec.c:136
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:448
uint64_t minimum_update_period
Definition: dashdec.c:152
struct fragment ** fragments
Definition: dashdec.c:93
static void free_representation(struct representation *pls)
Definition: dashdec.c:358
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:161
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:388
AVDictionary * opts
Definition: movenc.c:50
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
#define dynarray_add(tab, nb_ptr, elem)
Definition: internal.h:198
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
#define FFMIN(a, b)
Definition: common.h:96
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:213
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:336
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:3
int32_t
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1458
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2341
int n_subtitles
Definition: dashdec.c:143
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:218
#define s(width, name)
Definition: cbs_vp9.c:257
int is_live
Definition: dashdec.c:160
#define OFFSET(x)
Definition: dashdec.c:2359
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:410
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1192
int n
Definition: avisynth_c.h:760
AVDictionary * metadata
Definition: avformat.h:945
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don&#39;t avio_close() it.
Definition: avformat.h:1497
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
static int save_avio_options(AVFormatContext *s)
Definition: dashdec.c:1832
char * url
Definition: dashdec.c:36
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
uint64_t period_start
Definition: dashdec.c:158
char * adaptionset_contenttype_val
Definition: dashdec.c:125
char * adaptionset_maxwidth_val
Definition: dashdec.c:131
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1532
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:532
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1859
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:530
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:881
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:96
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVFormatContext * parent
Definition: dashdec.c:79
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:664
AVIOContext * pb
I/O context.
Definition: avformat.h:1400
int64_t last_seq_no
Definition: dashdec.c:99
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint32_t init_sec_buf_size
Definition: dashdec.c:115
int64_t cur_seq_no
Definition: dashdec.c:107
int max_url_size
Definition: dashdec.c:164
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:2008
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2024
void * buf
Definition: avisynth_c.h:766
uint64_t time_shift_buffer_depth
Definition: dashdec.c:153
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1680
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2174
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:712
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2512
AVMediaType
Definition: avutil.h:199
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:591
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:676
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1177
char id[20]
Definition: dashdec.c:87
AVDictionary * avio_opts
Definition: dashdec.c:163
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4393
This structure contains the data a format has to probe a file.
Definition: avformat.h:446
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1768
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
Round toward -infinity.
Definition: mathematics.h:82
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:473
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2376
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2248
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2506
char * adaptionset_lang_val
Definition: dashdec.c:127
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1712
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:458
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:34
char * adaptionset_maxbw_val
Definition: dashdec.c:129
char * adaptionset_minheight_val
Definition: dashdec.c:132
int
char * adaptionset_minframerate_val
Definition: dashdec.c:134
int64_t duration
Definition: dashdec.c:67
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:81
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3567
static const AVClass dash_class
Definition: dashdec.c:2369
int64_t fragment_duration
Definition: dashdec.c:102
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
struct fragment * cur_seg
Definition: dashdec.c:110
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1073
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:399
int bandwidth
Definition: dashdec.c:88
int den
Denominator.
Definition: rational.h:60
AVFormatContext * ctx
Definition: dashdec.c:80
int rep_count
Definition: dashdec.c:83
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4431
int64_t fragment_timescale
Definition: dashdec.c:103
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1869
int is_restart_needed
Definition: dashdec.c:119
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:761
#define av_free(p)
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:475
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1102
uint8_t * init_sec_buf
Definition: dashdec.c:114
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:476
AVRational framerate
Definition: dashdec.c:89
void * priv_data
Format private data.
Definition: avformat.h:1386
int64_t start_number
Definition: dashdec.c:100
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:183
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:821
char * adaptionset_minwidth_val
Definition: dashdec.c:130
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1418
int64_t cur_seg_offset
Definition: dashdec.c:108
struct representation ** videos
Definition: dashdec.c:140
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1473
static void free_video_list(DASHContext *c)
Definition: dashdec.c:377
#define av_freep(p)
void INT64 start
Definition: avisynth_c.h:766
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:654
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
char * url_template
Definition: dashdec.c:76
int is_init_section_common_video
Definition: dashdec.c:167
int stream_index
Definition: avcodec.h:1479
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
char * adaptionset_maxheight_val
Definition: dashdec.c:133
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1879
int64_t repeat
Definition: dashdec.c:63
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:936
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1005
This structure stores compressed data.
Definition: avcodec.h:1454
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:297
char * base_url
Definition: dashdec.c:124
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1953
GLuint buffer
Definition: opengl_enc.c:101
AVStream * assoc_stream
Definition: dashdec.c:90
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:127
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1490
static uint8_t tmp[11]
Definition: aes_ctr.c:26