FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 
31 #define INITIAL_BUFFER_SIZE 32768
32 
33 struct fragment {
34  int64_t url_offset;
35  int64_t size;
36  char *url;
37 };
38 
39 /*
40  * reference to : ISO_IEC_23009-1-DASH-2012
41  * Section: 5.3.9.6.2
42  * Table: Table 17 — Semantics of SegmentTimeline element
43  * */
44 struct timeline {
45  /* starttime: Element or Attribute Name
46  * specifies the MPD start time, in @timescale units,
47  * the first Segment in the series starts relative to the beginning of the Period.
48  * The value of this attribute must be equal to or greater than the sum of the previous S
49  * element earliest presentation time and the sum of the contiguous Segment durations.
50  * If the value of the attribute is greater than what is expressed by the previous S element,
51  * it expresses discontinuities in the timeline.
52  * If not present then the value shall be assumed to be zero for the first S element
53  * and for the subsequent S elements, the value shall be assumed to be the sum of
54  * the previous S element's earliest presentation time and contiguous duration
55  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
56  * */
57  int64_t starttime;
58  /* repeat: Element or Attribute Name
59  * specifies the repeat count of the number of following contiguous Segments with
60  * the same duration expressed by the value of @duration. This value is zero-based
61  * (e.g. a value of three means four Segments in the contiguous series).
62  * */
63  int64_t repeat;
64  /* duration: Element or Attribute Name
65  * specifies the Segment duration, in units of the value of the @timescale.
66  * */
67  int64_t duration;
68 };
69 
70 /*
71  * Each playlist has its own demuxer. If it is currently active,
72  * it has an opened AVIOContext too, and potentially an AVPacket
73  * containing the next packet from this stream.
74  */
76  char *url_template;
82  int rep_idx;
83  int rep_count;
85 
87  char id[20];
88  int bandwidth;
90  AVStream *assoc_stream; /* demuxer stream associated with this representation */
91 
93  struct fragment **fragments; /* VOD list of fragment for profile */
94 
96  struct timeline **timelines;
97 
98  int64_t first_seq_no;
99  int64_t last_seq_no;
100  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
101 
104 
106 
107  int64_t cur_seq_no;
108  int64_t cur_seg_offset;
109  int64_t cur_seg_size;
110  struct fragment *cur_seg;
111 
112  /* Currently active Media Initialization Section */
118  int64_t cur_timestamp;
120 };
121 
122 typedef struct DASHContext {
123  const AVClass *class;
124  char *base_url;
138 
139  int n_videos;
141  int n_audios;
145 
146  /* MediaPresentationDescription Attribute */
151  uint64_t publish_time;
154  uint64_t min_buffer_time;
155 
156  /* Period Attribute */
157  uint64_t period_duration;
158  uint64_t period_start;
159 
160  int is_live;
165 
166  /* Flags for init section*/
169 
170 } DASHContext;
171 
172 static int ishttp(char *url)
173 {
174  const char *proto_name = avio_find_protocol_name(url);
175  return av_strstart(proto_name, "http", NULL);
176 }
177 
178 static int aligned(int val)
179 {
180  return ((val + 0x3F) >> 6) << 6;
181 }
182 
183 static uint64_t get_current_time_in_sec(void)
184 {
185  return av_gettime() / 1000000;
186 }
187 
188 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
189 {
190  struct tm timeinfo;
191  int year = 0;
192  int month = 0;
193  int day = 0;
194  int hour = 0;
195  int minute = 0;
196  int ret = 0;
197  float second = 0.0;
198 
199  /* ISO-8601 date parser */
200  if (!datetime)
201  return 0;
202 
203  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
204  /* year, month, day, hour, minute, second 6 arguments */
205  if (ret != 6) {
206  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
207  }
208  timeinfo.tm_year = year - 1900;
209  timeinfo.tm_mon = month - 1;
210  timeinfo.tm_mday = day;
211  timeinfo.tm_hour = hour;
212  timeinfo.tm_min = minute;
213  timeinfo.tm_sec = (int)second;
214 
215  return av_timegm(&timeinfo);
216 }
217 
218 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
219 {
220  /* ISO-8601 duration parser */
221  uint32_t days = 0;
222  uint32_t hours = 0;
223  uint32_t mins = 0;
224  uint32_t secs = 0;
225  int size = 0;
226  float value = 0;
227  char type = '\0';
228  const char *ptr = duration;
229 
230  while (*ptr) {
231  if (*ptr == 'P' || *ptr == 'T') {
232  ptr++;
233  continue;
234  }
235 
236  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
237  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
238  return 0; /* parser error */
239  }
240  switch (type) {
241  case 'D':
242  days = (uint32_t)value;
243  break;
244  case 'H':
245  hours = (uint32_t)value;
246  break;
247  case 'M':
248  mins = (uint32_t)value;
249  break;
250  case 'S':
251  secs = (uint32_t)value;
252  break;
253  default:
254  // handle invalid type
255  break;
256  }
257  ptr += size;
258  }
259  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
260 }
261 
262 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
263 {
264  int64_t start_time = 0;
265  int64_t i = 0;
266  int64_t j = 0;
267  int64_t num = 0;
268 
269  if (pls->n_timelines) {
270  for (i = 0; i < pls->n_timelines; i++) {
271  if (pls->timelines[i]->starttime > 0) {
272  start_time = pls->timelines[i]->starttime;
273  }
274  if (num == cur_seq_no)
275  goto finish;
276 
277  start_time += pls->timelines[i]->duration;
278 
279  if (pls->timelines[i]->repeat == -1) {
280  start_time = pls->timelines[i]->duration * cur_seq_no;
281  goto finish;
282  }
283 
284  for (j = 0; j < pls->timelines[i]->repeat; j++) {
285  num++;
286  if (num == cur_seq_no)
287  goto finish;
288  start_time += pls->timelines[i]->duration;
289  }
290  num++;
291  }
292  }
293 finish:
294  return start_time;
295 }
296 
297 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
298 {
299  int64_t i = 0;
300  int64_t j = 0;
301  int64_t num = 0;
302  int64_t start_time = 0;
303 
304  for (i = 0; i < pls->n_timelines; i++) {
305  if (pls->timelines[i]->starttime > 0) {
306  start_time = pls->timelines[i]->starttime;
307  }
308  if (start_time > cur_time)
309  goto finish;
310 
311  start_time += pls->timelines[i]->duration;
312  for (j = 0; j < pls->timelines[i]->repeat; j++) {
313  num++;
314  if (start_time > cur_time)
315  goto finish;
316  start_time += pls->timelines[i]->duration;
317  }
318  num++;
319  }
320 
321  return -1;
322 
323 finish:
324  return num;
325 }
326 
327 static void free_fragment(struct fragment **seg)
328 {
329  if (!(*seg)) {
330  return;
331  }
332  av_freep(&(*seg)->url);
333  av_freep(seg);
334 }
335 
336 static void free_fragment_list(struct representation *pls)
337 {
338  int i;
339 
340  for (i = 0; i < pls->n_fragments; i++) {
341  free_fragment(&pls->fragments[i]);
342  }
343  av_freep(&pls->fragments);
344  pls->n_fragments = 0;
345 }
346 
347 static void free_timelines_list(struct representation *pls)
348 {
349  int i;
350 
351  for (i = 0; i < pls->n_timelines; i++) {
352  av_freep(&pls->timelines[i]);
353  }
354  av_freep(&pls->timelines);
355  pls->n_timelines = 0;
356 }
357 
358 static void free_representation(struct representation *pls)
359 {
360  free_fragment_list(pls);
361  free_timelines_list(pls);
362  free_fragment(&pls->cur_seg);
364  av_freep(&pls->init_sec_buf);
365  av_freep(&pls->pb.buffer);
366  ff_format_io_close(pls->parent, &pls->input);
367  if (pls->ctx) {
368  pls->ctx->pb = NULL;
369  avformat_close_input(&pls->ctx);
370  }
371 
372  av_freep(&pls->url_template);
373  av_freep(&pls);
374 }
375 
377 {
378  int i;
379  for (i = 0; i < c->n_videos; i++) {
380  struct representation *pls = c->videos[i];
381  free_representation(pls);
382  }
383  av_freep(&c->videos);
384  c->n_videos = 0;
385 }
386 
388 {
389  int i;
390  for (i = 0; i < c->n_audios; i++) {
391  struct representation *pls = c->audios[i];
392  free_representation(pls);
393  }
394  av_freep(&c->audios);
395  c->n_audios = 0;
396 }
397 
399 {
400  int i;
401  for (i = 0; i < c->n_subtitles; i++) {
402  struct representation *pls = c->subtitles[i];
403  free_representation(pls);
404  }
405  av_freep(&c->subtitles);
406  c->n_subtitles = 0;
407 }
408 
409 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
410  AVDictionary *opts, AVDictionary *opts2, int *is_http)
411 {
412  DASHContext *c = s->priv_data;
413  AVDictionary *tmp = NULL;
414  const char *proto_name = NULL;
415  int ret;
416 
417  av_dict_copy(&tmp, opts, 0);
418  av_dict_copy(&tmp, opts2, 0);
419 
420  if (av_strstart(url, "crypto", NULL)) {
421  if (url[6] == '+' || url[6] == ':')
422  proto_name = avio_find_protocol_name(url + 7);
423  }
424 
425  if (!proto_name)
426  proto_name = avio_find_protocol_name(url);
427 
428  if (!proto_name)
429  return AVERROR_INVALIDDATA;
430 
431  // only http(s) & file are allowed
432  if (av_strstart(proto_name, "file", NULL)) {
433  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
434  av_log(s, AV_LOG_ERROR,
435  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
436  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
437  url);
438  return AVERROR_INVALIDDATA;
439  }
440  } else if (av_strstart(proto_name, "http", NULL)) {
441  ;
442  } else
443  return AVERROR_INVALIDDATA;
444 
445  if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
446  ;
447  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':')
448  ;
449  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
450  return AVERROR_INVALIDDATA;
451 
452  av_freep(pb);
453  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
454  if (ret >= 0) {
455  // update cookies on http response with setcookies.
456  char *new_cookies = NULL;
457 
458  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
459  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
460 
461  if (new_cookies) {
462  av_dict_set(&opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
463  }
464 
465  }
466 
467  av_dict_free(&tmp);
468 
469  if (is_http)
470  *is_http = av_strstart(proto_name, "http", NULL);
471 
472  return ret;
473 }
474 
475 static char *get_content_url(xmlNodePtr *baseurl_nodes,
476  int n_baseurl_nodes,
477  int max_url_size,
478  char *rep_id_val,
479  char *rep_bandwidth_val,
480  char *val)
481 {
482  int i;
483  char *text;
484  char *url = NULL;
485  char *tmp_str = av_mallocz(max_url_size);
486  char *tmp_str_2 = av_mallocz(max_url_size);
487 
488  if (!tmp_str || !tmp_str_2) {
489  return NULL;
490  }
491 
492  for (i = 0; i < n_baseurl_nodes; ++i) {
493  if (baseurl_nodes[i] &&
494  baseurl_nodes[i]->children &&
495  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
496  text = xmlNodeGetContent(baseurl_nodes[i]->children);
497  if (text) {
498  memset(tmp_str, 0, max_url_size);
499  memset(tmp_str_2, 0, max_url_size);
500  ff_make_absolute_url(tmp_str_2, max_url_size, tmp_str, text);
501  av_strlcpy(tmp_str, tmp_str_2, max_url_size);
502  xmlFree(text);
503  }
504  }
505  }
506 
507  if (val)
508  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
509 
510  if (rep_id_val) {
511  url = av_strireplace(tmp_str, "$RepresentationID$", (const char*)rep_id_val);
512  if (!url) {
513  goto end;
514  }
515  av_strlcpy(tmp_str, url, max_url_size);
516  }
517  if (rep_bandwidth_val && tmp_str[0] != '\0') {
518  // free any previously assigned url before reassigning
519  av_free(url);
520  url = av_strireplace(tmp_str, "$Bandwidth$", (const char*)rep_bandwidth_val);
521  if (!url) {
522  goto end;
523  }
524  }
525 end:
526  av_free(tmp_str);
527  av_free(tmp_str_2);
528  return url;
529 }
530 
531 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
532 {
533  int i;
534  char *val;
535 
536  for (i = 0; i < n_nodes; ++i) {
537  if (nodes[i]) {
538  val = xmlGetProp(nodes[i], attrname);
539  if (val)
540  return val;
541  }
542  }
543 
544  return NULL;
545 }
546 
547 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
548 {
549  xmlNodePtr node = rootnode;
550  if (!node) {
551  return NULL;
552  }
553 
554  node = xmlFirstElementChild(node);
555  while (node) {
556  if (!av_strcasecmp(node->name, nodename)) {
557  return node;
558  }
559  node = xmlNextElementSibling(node);
560  }
561  return NULL;
562 }
563 
564 static enum AVMediaType get_content_type(xmlNodePtr node)
565 {
567  int i = 0;
568  const char *attr;
569  char *val = NULL;
570 
571  if (node) {
572  for (i = 0; i < 2; i++) {
573  attr = i ? "mimeType" : "contentType";
574  val = xmlGetProp(node, attr);
575  if (val) {
576  if (av_stristr((const char *)val, "video")) {
577  type = AVMEDIA_TYPE_VIDEO;
578  } else if (av_stristr((const char *)val, "audio")) {
579  type = AVMEDIA_TYPE_AUDIO;
580  } else if (av_stristr((const char *)val, "text")) {
581  type = AVMEDIA_TYPE_SUBTITLE;
582  }
583  xmlFree(val);
584  }
585  }
586  }
587  return type;
588 }
589 
590 static struct fragment * get_Fragment(char *range)
591 {
592  struct fragment * seg = av_mallocz(sizeof(struct fragment));
593 
594  if (!seg)
595  return NULL;
596 
597  seg->size = -1;
598  if (range) {
599  char *str_end_offset;
600  char *str_offset = av_strtok(range, "-", &str_end_offset);
601  seg->url_offset = strtoll(str_offset, NULL, 10);
602  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset;
603  }
604 
605  return seg;
606 }
607 
609  xmlNodePtr fragmenturl_node,
610  xmlNodePtr *baseurl_nodes,
611  char *rep_id_val,
612  char *rep_bandwidth_val)
613 {
614  DASHContext *c = s->priv_data;
615  char *initialization_val = NULL;
616  char *media_val = NULL;
617  char *range_val = NULL;
618  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
619 
620  if (!av_strcasecmp(fragmenturl_node->name, (const char *)"Initialization")) {
621  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
622  range_val = xmlGetProp(fragmenturl_node, "range");
623  if (initialization_val || range_val) {
624  rep->init_section = get_Fragment(range_val);
625  if (!rep->init_section) {
626  xmlFree(initialization_val);
627  xmlFree(range_val);
628  return AVERROR(ENOMEM);
629  }
630  rep->init_section->url = get_content_url(baseurl_nodes, 4,
631  max_url_size,
632  rep_id_val,
633  rep_bandwidth_val,
634  initialization_val);
635 
636  if (!rep->init_section->url) {
637  av_free(rep->init_section);
638  xmlFree(initialization_val);
639  xmlFree(range_val);
640  return AVERROR(ENOMEM);
641  }
642  xmlFree(initialization_val);
643  xmlFree(range_val);
644  }
645  } else if (!av_strcasecmp(fragmenturl_node->name, (const char *)"SegmentURL")) {
646  media_val = xmlGetProp(fragmenturl_node, "media");
647  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
648  if (media_val || range_val) {
649  struct fragment *seg = get_Fragment(range_val);
650  if (!seg) {
651  xmlFree(media_val);
652  xmlFree(range_val);
653  return AVERROR(ENOMEM);
654  }
655  seg->url = get_content_url(baseurl_nodes, 4,
656  max_url_size,
657  rep_id_val,
658  rep_bandwidth_val,
659  media_val);
660  if (!seg->url) {
661  av_free(seg);
662  xmlFree(media_val);
663  xmlFree(range_val);
664  return AVERROR(ENOMEM);
665  }
666  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
667  xmlFree(media_val);
668  xmlFree(range_val);
669  }
670  }
671 
672  return 0;
673 }
674 
676  xmlNodePtr fragment_timeline_node)
677 {
678  xmlAttrPtr attr = NULL;
679  char *val = NULL;
680 
681  if (!av_strcasecmp(fragment_timeline_node->name, (const char *)"S")) {
682  struct timeline *tml = av_mallocz(sizeof(struct timeline));
683  if (!tml) {
684  return AVERROR(ENOMEM);
685  }
686  attr = fragment_timeline_node->properties;
687  while (attr) {
688  val = xmlGetProp(fragment_timeline_node, attr->name);
689 
690  if (!val) {
691  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
692  continue;
693  }
694 
695  if (!av_strcasecmp(attr->name, (const char *)"t")) {
696  tml->starttime = (int64_t)strtoll(val, NULL, 10);
697  } else if (!av_strcasecmp(attr->name, (const char *)"r")) {
698  tml->repeat =(int64_t) strtoll(val, NULL, 10);
699  } else if (!av_strcasecmp(attr->name, (const char *)"d")) {
700  tml->duration = (int64_t)strtoll(val, NULL, 10);
701  }
702  attr = attr->next;
703  xmlFree(val);
704  }
705  dynarray_add(&rep->timelines, &rep->n_timelines, tml);
706  }
707 
708  return 0;
709 }
710 
711 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
712 {
713  char *tmp_str = NULL;
714  char *path = NULL;
715  char *mpdName = NULL;
716  xmlNodePtr node = NULL;
717  char *baseurl = NULL;
718  char *root_url = NULL;
719  char *text = NULL;
720  char *tmp = NULL;
721  int isRootHttp = 0;
722  char token ='/';
723  int start = 0;
724  int rootId = 0;
725  int updated = 0;
726  int size = 0;
727  int i;
728  int tmp_max_url_size = strlen(url);
729 
730  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
731  text = xmlNodeGetContent(baseurl_nodes[i]);
732  if (!text)
733  continue;
734  tmp_max_url_size += strlen(text);
735  if (ishttp(text)) {
736  xmlFree(text);
737  break;
738  }
739  xmlFree(text);
740  }
741 
742  tmp_max_url_size = aligned(tmp_max_url_size);
743  text = av_mallocz(tmp_max_url_size);
744  if (!text) {
745  updated = AVERROR(ENOMEM);
746  goto end;
747  }
748  av_strlcpy(text, url, strlen(url)+1);
749  tmp = text;
750  while (mpdName = av_strtok(tmp, "/", &tmp)) {
751  size = strlen(mpdName);
752  }
753  av_free(text);
754 
755  path = av_mallocz(tmp_max_url_size);
756  tmp_str = av_mallocz(tmp_max_url_size);
757  if (!tmp_str || !path) {
758  updated = AVERROR(ENOMEM);
759  goto end;
760  }
761 
762  av_strlcpy (path, url, strlen(url) - size + 1);
763  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
764  if (!(node = baseurl_nodes[rootId])) {
765  continue;
766  }
767  text = xmlNodeGetContent(node);
768  if (ishttp(text)) {
769  xmlFree(text);
770  break;
771  }
772  xmlFree(text);
773  }
774 
775  node = baseurl_nodes[rootId];
776  baseurl = xmlNodeGetContent(node);
777  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
778  if (node) {
779  xmlNodeSetContent(node, root_url);
780  updated = 1;
781  }
782 
783  size = strlen(root_url);
784  isRootHttp = ishttp(root_url);
785 
786  if (root_url[size - 1] != token) {
787  av_strlcat(root_url, "/", size + 2);
788  size += 2;
789  }
790 
791  for (i = 0; i < n_baseurl_nodes; ++i) {
792  if (i == rootId) {
793  continue;
794  }
795  text = xmlNodeGetContent(baseurl_nodes[i]);
796  if (text && !av_strstart(text, "/", NULL)) {
797  memset(tmp_str, 0, strlen(tmp_str));
798  if (!ishttp(text) && isRootHttp) {
799  av_strlcpy(tmp_str, root_url, size + 1);
800  }
801  start = (text[0] == token);
802  if (start && av_stristr(tmp_str, text)) {
803  char *p = tmp_str;
804  if (!av_strncasecmp(tmp_str, "http://", 7)) {
805  p += 7;
806  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
807  p += 8;
808  }
809  p = strchr(p, '/');
810  memset(p + 1, 0, strlen(p));
811  }
812  av_strlcat(tmp_str, text + start, tmp_max_url_size);
813  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
814  updated = 1;
815  xmlFree(text);
816  }
817  }
818 
819 end:
820  if (tmp_max_url_size > *max_url_size) {
821  *max_url_size = tmp_max_url_size;
822  }
823  av_free(path);
824  av_free(tmp_str);
825  xmlFree(baseurl);
826  return updated;
827 
828 }
829 
831  xmlNodePtr node,
832  xmlNodePtr adaptionset_node,
833  xmlNodePtr mpd_baseurl_node,
834  xmlNodePtr period_baseurl_node,
835  xmlNodePtr period_segmenttemplate_node,
836  xmlNodePtr period_segmentlist_node,
837  xmlNodePtr fragment_template_node,
838  xmlNodePtr content_component_node,
839  xmlNodePtr adaptionset_baseurl_node,
840  xmlNodePtr adaptionset_segmentlist_node,
841  xmlNodePtr adaptionset_supplementalproperty_node)
842 {
843  int32_t ret = 0;
844  int32_t subtitle_rep_idx = 0;
845  int32_t audio_rep_idx = 0;
846  int32_t video_rep_idx = 0;
847  DASHContext *c = s->priv_data;
848  struct representation *rep = NULL;
849  struct fragment *seg = NULL;
850  xmlNodePtr representation_segmenttemplate_node = NULL;
851  xmlNodePtr representation_baseurl_node = NULL;
852  xmlNodePtr representation_segmentlist_node = NULL;
853  xmlNodePtr segmentlists_tab[3];
854  xmlNodePtr fragment_timeline_node = NULL;
855  xmlNodePtr fragment_templates_tab[5];
856  char *duration_val = NULL;
857  char *presentation_timeoffset_val = NULL;
858  char *startnumber_val = NULL;
859  char *timescale_val = NULL;
860  char *initialization_val = NULL;
861  char *media_val = NULL;
862  char *val = NULL;
863  xmlNodePtr baseurl_nodes[4];
864  xmlNodePtr representation_node = node;
865  char *rep_id_val = xmlGetProp(representation_node, "id");
866  char *rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
867  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
869 
870  // try get information from representation
871  if (type == AVMEDIA_TYPE_UNKNOWN)
872  type = get_content_type(representation_node);
873  // try get information from contentComponen
874  if (type == AVMEDIA_TYPE_UNKNOWN)
875  type = get_content_type(content_component_node);
876  // try get information from adaption set
877  if (type == AVMEDIA_TYPE_UNKNOWN)
878  type = get_content_type(adaptionset_node);
879  if (type == AVMEDIA_TYPE_UNKNOWN) {
880  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
881  } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
882  // convert selected representation to our internal struct
883  rep = av_mallocz(sizeof(struct representation));
884  if (!rep) {
885  ret = AVERROR(ENOMEM);
886  goto end;
887  }
888  rep->parent = s;
889  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
890  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
891  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
892 
893  baseurl_nodes[0] = mpd_baseurl_node;
894  baseurl_nodes[1] = period_baseurl_node;
895  baseurl_nodes[2] = adaptionset_baseurl_node;
896  baseurl_nodes[3] = representation_baseurl_node;
897 
898  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
900  + (rep_id_val ? strlen(rep_id_val) : 0)
901  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
902  if (ret == AVERROR(ENOMEM) || ret == 0) {
903  goto end;
904  }
905  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
906  fragment_timeline_node = NULL;
907  fragment_templates_tab[0] = representation_segmenttemplate_node;
908  fragment_templates_tab[1] = adaptionset_segmentlist_node;
909  fragment_templates_tab[2] = fragment_template_node;
910  fragment_templates_tab[3] = period_segmenttemplate_node;
911  fragment_templates_tab[4] = period_segmentlist_node;
912 
913  presentation_timeoffset_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
914  duration_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
915  startnumber_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
916  timescale_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
917  initialization_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
918  media_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
919 
920  if (initialization_val) {
921  rep->init_section = av_mallocz(sizeof(struct fragment));
922  if (!rep->init_section) {
923  av_free(rep);
924  ret = AVERROR(ENOMEM);
925  goto end;
926  }
927  c->max_url_size = aligned(c->max_url_size + strlen(initialization_val));
928  rep->init_section->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, initialization_val);
929  if (!rep->init_section->url) {
930  av_free(rep->init_section);
931  av_free(rep);
932  ret = AVERROR(ENOMEM);
933  goto end;
934  }
935  rep->init_section->size = -1;
936  xmlFree(initialization_val);
937  }
938 
939  if (media_val) {
940  c->max_url_size = aligned(c->max_url_size + strlen(media_val));
941  rep->url_template = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, media_val);
942  xmlFree(media_val);
943  }
944 
945  if (presentation_timeoffset_val) {
946  rep->presentation_timeoffset = (int64_t) strtoll(presentation_timeoffset_val, NULL, 10);
947  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
948  xmlFree(presentation_timeoffset_val);
949  }
950  if (duration_val) {
951  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
952  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
953  xmlFree(duration_val);
954  }
955  if (timescale_val) {
956  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
957  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
958  xmlFree(timescale_val);
959  }
960  if (startnumber_val) {
961  rep->start_number = rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 10);
962  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
963  xmlFree(startnumber_val);
964  }
965  if (adaptionset_supplementalproperty_node) {
966  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
967  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
968  if (!val) {
969  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
970  } else {
971  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
972  xmlFree(val);
973  }
974  }
975  }
976 
977  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
978 
979  if (!fragment_timeline_node)
980  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
981  if (!fragment_timeline_node)
982  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
983  if (!fragment_timeline_node)
984  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
985  if (fragment_timeline_node) {
986  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
987  while (fragment_timeline_node) {
988  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
989  if (ret < 0) {
990  return ret;
991  }
992  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
993  }
994  }
995  } else if (representation_baseurl_node && !representation_segmentlist_node) {
996  seg = av_mallocz(sizeof(struct fragment));
997  if (!seg) {
998  ret = AVERROR(ENOMEM);
999  goto end;
1000  }
1001  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, NULL);
1002  if (!seg->url) {
1003  av_free(seg);
1004  ret = AVERROR(ENOMEM);
1005  goto end;
1006  }
1007  seg->size = -1;
1008  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
1009  } else if (representation_segmentlist_node) {
1010  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1011  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1012  xmlNodePtr fragmenturl_node = NULL;
1013  segmentlists_tab[0] = representation_segmentlist_node;
1014  segmentlists_tab[1] = adaptionset_segmentlist_node;
1015  segmentlists_tab[2] = period_segmentlist_node;
1016 
1017  duration_val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1018  timescale_val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1019  startnumber_val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1020  if (duration_val) {
1021  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
1022  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1023  xmlFree(duration_val);
1024  }
1025  if (timescale_val) {
1026  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
1027  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1028  xmlFree(timescale_val);
1029  }
1030  if (startnumber_val) {
1031  rep->start_number = rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 10);
1032  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1033  xmlFree(startnumber_val);
1034  }
1035 
1036  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1037  while (fragmenturl_node) {
1038  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1039  baseurl_nodes,
1040  rep_id_val,
1041  rep_bandwidth_val);
1042  if (ret < 0) {
1043  return ret;
1044  }
1045  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1046  }
1047 
1048  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
1049 
1050  if (!fragment_timeline_node)
1051  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
1052  if (!fragment_timeline_node)
1053  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1054  if (!fragment_timeline_node)
1055  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1056  if (fragment_timeline_node) {
1057  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1058  while (fragment_timeline_node) {
1059  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1060  if (ret < 0) {
1061  return ret;
1062  }
1063  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1064  }
1065  }
1066  } else {
1067  free_representation(rep);
1068  rep = NULL;
1069  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id[%s] \n", (const char *)rep_id_val);
1070  }
1071 
1072  if (rep) {
1073  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1074  rep->fragment_timescale = 1;
1075  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1076  strncpy(rep->id, rep_id_val ? rep_id_val : "", sizeof(rep->id));
1077  rep->framerate = av_make_q(0, 0);
1078  if (type == AVMEDIA_TYPE_VIDEO && rep_framerate_val) {
1079  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1080  if (ret < 0)
1081  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1082  }
1083 
1084  switch (type) {
1085  case AVMEDIA_TYPE_VIDEO:
1086  rep->rep_idx = video_rep_idx;
1087  dynarray_add(&c->videos, &c->n_videos, rep);
1088  break;
1089  case AVMEDIA_TYPE_AUDIO:
1090  rep->rep_idx = audio_rep_idx;
1091  dynarray_add(&c->audios, &c->n_audios, rep);
1092  break;
1093  case AVMEDIA_TYPE_SUBTITLE:
1094  rep->rep_idx = subtitle_rep_idx;
1095  dynarray_add(&c->subtitles, &c->n_subtitles, rep);
1096  break;
1097  default:
1098  av_log(s, AV_LOG_WARNING, "Unsupported the stream type %d\n", type);
1099  break;
1100  }
1101  }
1102  }
1103 
1104  video_rep_idx += type == AVMEDIA_TYPE_VIDEO;
1105  audio_rep_idx += type == AVMEDIA_TYPE_AUDIO;
1106  subtitle_rep_idx += type == AVMEDIA_TYPE_SUBTITLE;
1107 
1108 end:
1109  if (rep_id_val)
1110  xmlFree(rep_id_val);
1111  if (rep_bandwidth_val)
1112  xmlFree(rep_bandwidth_val);
1113  if (rep_framerate_val)
1114  xmlFree(rep_framerate_val);
1115 
1116  return ret;
1117 }
1118 
1120  xmlNodePtr adaptionset_node,
1121  xmlNodePtr mpd_baseurl_node,
1122  xmlNodePtr period_baseurl_node,
1123  xmlNodePtr period_segmenttemplate_node,
1124  xmlNodePtr period_segmentlist_node)
1125 {
1126  int ret = 0;
1127  DASHContext *c = s->priv_data;
1128  xmlNodePtr fragment_template_node = NULL;
1129  xmlNodePtr content_component_node = NULL;
1130  xmlNodePtr adaptionset_baseurl_node = NULL;
1131  xmlNodePtr adaptionset_segmentlist_node = NULL;
1132  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1133  xmlNodePtr node = NULL;
1134  c->adaptionset_contenttype_val = xmlGetProp(adaptionset_node, "contentType");
1135  c->adaptionset_par_val = xmlGetProp(adaptionset_node, "par");
1136  c->adaptionset_lang_val = xmlGetProp(adaptionset_node, "lang");
1137  c->adaptionset_minbw_val = xmlGetProp(adaptionset_node, "minBandwidth");
1138  c->adaptionset_maxbw_val = xmlGetProp(adaptionset_node, "maxBandwidth");
1139  c->adaptionset_minwidth_val = xmlGetProp(adaptionset_node, "minWidth");
1140  c->adaptionset_maxwidth_val = xmlGetProp(adaptionset_node, "maxWidth");
1141  c->adaptionset_minheight_val = xmlGetProp(adaptionset_node, "minHeight");
1142  c->adaptionset_maxheight_val = xmlGetProp(adaptionset_node, "maxHeight");
1143  c->adaptionset_minframerate_val = xmlGetProp(adaptionset_node, "minFrameRate");
1144  c->adaptionset_maxframerate_val = xmlGetProp(adaptionset_node, "maxFrameRate");
1145  c->adaptionset_segmentalignment_val = xmlGetProp(adaptionset_node, "segmentAlignment");
1146  c->adaptionset_bitstreamswitching_val = xmlGetProp(adaptionset_node, "bitstreamSwitching");
1147 
1148  node = xmlFirstElementChild(adaptionset_node);
1149  while (node) {
1150  if (!av_strcasecmp(node->name, (const char *)"SegmentTemplate")) {
1151  fragment_template_node = node;
1152  } else if (!av_strcasecmp(node->name, (const char *)"ContentComponent")) {
1153  content_component_node = node;
1154  } else if (!av_strcasecmp(node->name, (const char *)"BaseURL")) {
1155  adaptionset_baseurl_node = node;
1156  } else if (!av_strcasecmp(node->name, (const char *)"SegmentList")) {
1157  adaptionset_segmentlist_node = node;
1158  } else if (!av_strcasecmp(node->name, (const char *)"SupplementalProperty")) {
1159  adaptionset_supplementalproperty_node = node;
1160  } else if (!av_strcasecmp(node->name, (const char *)"Representation")) {
1161  ret = parse_manifest_representation(s, url, node,
1162  adaptionset_node,
1163  mpd_baseurl_node,
1164  period_baseurl_node,
1165  period_segmenttemplate_node,
1166  period_segmentlist_node,
1167  fragment_template_node,
1168  content_component_node,
1169  adaptionset_baseurl_node,
1170  adaptionset_segmentlist_node,
1171  adaptionset_supplementalproperty_node);
1172  if (ret < 0) {
1173  return ret;
1174  }
1175  }
1176  node = xmlNextElementSibling(node);
1177  }
1178  return 0;
1179 }
1180 
1181 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1182 {
1183  xmlChar *val = NULL;
1184 
1185  node = xmlFirstElementChild(node);
1186  while (node) {
1187  if (!av_strcasecmp(node->name, "Title")) {
1188  val = xmlNodeGetContent(node);
1189  if (val) {
1190  av_dict_set(&s->metadata, "Title", val, 0);
1191  }
1192  } else if (!av_strcasecmp(node->name, "Source")) {
1193  val = xmlNodeGetContent(node);
1194  if (val) {
1195  av_dict_set(&s->metadata, "Source", val, 0);
1196  }
1197  } else if (!av_strcasecmp(node->name, "Copyright")) {
1198  val = xmlNodeGetContent(node);
1199  if (val) {
1200  av_dict_set(&s->metadata, "Copyright", val, 0);
1201  }
1202  }
1203  node = xmlNextElementSibling(node);
1204  xmlFree(val);
1205  val = NULL;
1206  }
1207  return 0;
1208 }
1209 
1210 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1211 {
1212  DASHContext *c = s->priv_data;
1213  int ret = 0;
1214  int close_in = 0;
1215  uint8_t *new_url = NULL;
1216  int64_t filesize = 0;
1217  char *buffer = NULL;
1218  AVDictionary *opts = NULL;
1219  xmlDoc *doc = NULL;
1220  xmlNodePtr root_element = NULL;
1221  xmlNodePtr node = NULL;
1222  xmlNodePtr period_node = NULL;
1223  xmlNodePtr tmp_node = NULL;
1224  xmlNodePtr mpd_baseurl_node = NULL;
1225  xmlNodePtr period_baseurl_node = NULL;
1226  xmlNodePtr period_segmenttemplate_node = NULL;
1227  xmlNodePtr period_segmentlist_node = NULL;
1228  xmlNodePtr adaptionset_node = NULL;
1229  xmlAttrPtr attr = NULL;
1230  char *val = NULL;
1231  uint32_t period_duration_sec = 0;
1232  uint32_t period_start_sec = 0;
1233 
1234  if (!in) {
1235  close_in = 1;
1236 
1237  av_dict_copy(&opts, c->avio_opts, 0);
1238  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1239  av_dict_free(&opts);
1240  if (ret < 0)
1241  return ret;
1242  }
1243 
1244  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0) {
1245  c->base_url = av_strdup(new_url);
1246  } else {
1247  c->base_url = av_strdup(url);
1248  }
1249 
1250  filesize = avio_size(in);
1251  if (filesize <= 0) {
1252  filesize = 8 * 1024;
1253  }
1254 
1255  buffer = av_mallocz(filesize);
1256  if (!buffer) {
1257  av_free(c->base_url);
1258  return AVERROR(ENOMEM);
1259  }
1260 
1261  filesize = avio_read(in, buffer, filesize);
1262  if (filesize <= 0) {
1263  av_log(s, AV_LOG_ERROR, "Unable to read to offset '%s'\n", url);
1264  ret = AVERROR_INVALIDDATA;
1265  } else {
1266  LIBXML_TEST_VERSION
1267 
1268  doc = xmlReadMemory(buffer, filesize, c->base_url, NULL, 0);
1269  root_element = xmlDocGetRootElement(doc);
1270  node = root_element;
1271 
1272  if (!node) {
1273  ret = AVERROR_INVALIDDATA;
1274  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1275  goto cleanup;
1276  }
1277 
1278  if (node->type != XML_ELEMENT_NODE ||
1279  av_strcasecmp(node->name, (const char *)"MPD")) {
1280  ret = AVERROR_INVALIDDATA;
1281  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1282  goto cleanup;
1283  }
1284 
1285  val = xmlGetProp(node, "type");
1286  if (!val) {
1287  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1288  ret = AVERROR_INVALIDDATA;
1289  goto cleanup;
1290  }
1291  if (!av_strcasecmp(val, (const char *)"dynamic"))
1292  c->is_live = 1;
1293  xmlFree(val);
1294 
1295  attr = node->properties;
1296  while (attr) {
1297  val = xmlGetProp(node, attr->name);
1298 
1299  if (!av_strcasecmp(attr->name, (const char *)"availabilityStartTime")) {
1300  c->availability_start_time = get_utc_date_time_insec(s, (const char *)val);
1301  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1302  } else if (!av_strcasecmp(attr->name, (const char *)"availabilityEndTime")) {
1303  c->availability_end_time = get_utc_date_time_insec(s, (const char *)val);
1304  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1305  } else if (!av_strcasecmp(attr->name, (const char *)"publishTime")) {
1306  c->publish_time = get_utc_date_time_insec(s, (const char *)val);
1307  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1308  } else if (!av_strcasecmp(attr->name, (const char *)"minimumUpdatePeriod")) {
1309  c->minimum_update_period = get_duration_insec(s, (const char *)val);
1310  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1311  } else if (!av_strcasecmp(attr->name, (const char *)"timeShiftBufferDepth")) {
1312  c->time_shift_buffer_depth = get_duration_insec(s, (const char *)val);
1313  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1314  } else if (!av_strcasecmp(attr->name, (const char *)"minBufferTime")) {
1315  c->min_buffer_time = get_duration_insec(s, (const char *)val);
1316  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1317  } else if (!av_strcasecmp(attr->name, (const char *)"suggestedPresentationDelay")) {
1318  c->suggested_presentation_delay = get_duration_insec(s, (const char *)val);
1319  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1320  } else if (!av_strcasecmp(attr->name, (const char *)"mediaPresentationDuration")) {
1321  c->media_presentation_duration = get_duration_insec(s, (const char *)val);
1322  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1323  }
1324  attr = attr->next;
1325  xmlFree(val);
1326  }
1327 
1328  tmp_node = find_child_node_by_name(node, "BaseURL");
1329  if (tmp_node) {
1330  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1331  } else {
1332  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1333  }
1334 
1335  // at now we can handle only one period, with the longest duration
1336  node = xmlFirstElementChild(node);
1337  while (node) {
1338  if (!av_strcasecmp(node->name, (const char *)"Period")) {
1339  period_duration_sec = 0;
1340  period_start_sec = 0;
1341  attr = node->properties;
1342  while (attr) {
1343  val = xmlGetProp(node, attr->name);
1344  if (!av_strcasecmp(attr->name, (const char *)"duration")) {
1345  period_duration_sec = get_duration_insec(s, (const char *)val);
1346  } else if (!av_strcasecmp(attr->name, (const char *)"start")) {
1347  period_start_sec = get_duration_insec(s, (const char *)val);
1348  }
1349  attr = attr->next;
1350  xmlFree(val);
1351  }
1352  if ((period_duration_sec) >= (c->period_duration)) {
1353  period_node = node;
1354  c->period_duration = period_duration_sec;
1355  c->period_start = period_start_sec;
1356  if (c->period_start > 0)
1358  }
1359  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1360  parse_programinformation(s, node);
1361  }
1362  node = xmlNextElementSibling(node);
1363  }
1364  if (!period_node) {
1365  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1366  ret = AVERROR_INVALIDDATA;
1367  goto cleanup;
1368  }
1369 
1370  adaptionset_node = xmlFirstElementChild(period_node);
1371  while (adaptionset_node) {
1372  if (!av_strcasecmp(adaptionset_node->name, (const char *)"BaseURL")) {
1373  period_baseurl_node = adaptionset_node;
1374  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentTemplate")) {
1375  period_segmenttemplate_node = adaptionset_node;
1376  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentList")) {
1377  period_segmentlist_node = adaptionset_node;
1378  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"AdaptationSet")) {
1379  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1380  }
1381  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1382  }
1383 cleanup:
1384  /*free the document */
1385  xmlFreeDoc(doc);
1386  xmlCleanupParser();
1387  xmlFreeNode(mpd_baseurl_node);
1388  }
1389 
1390  av_free(new_url);
1391  av_free(buffer);
1392  if (close_in) {
1393  avio_close(in);
1394  }
1395  return ret;
1396 }
1397 
1398 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1399 {
1400  DASHContext *c = s->priv_data;
1401  int64_t num = 0;
1402  int64_t start_time_offset = 0;
1403 
1404  if (c->is_live) {
1405  if (pls->n_fragments) {
1406  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1407  num = pls->first_seq_no;
1408  } else if (pls->n_timelines) {
1409  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1410  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1411  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1412  if (num == -1)
1413  num = pls->first_seq_no;
1414  else
1415  num += pls->first_seq_no;
1416  } else if (pls->fragment_duration){
1417  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1418  if (pls->presentation_timeoffset) {
1420  } else if (c->publish_time > 0 && !c->availability_start_time) {
1421  if (c->min_buffer_time) {
1423  } else {
1425  }
1426  } else {
1428  }
1429  }
1430  } else {
1431  num = pls->first_seq_no;
1432  }
1433  return num;
1434 }
1435 
1436 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1437 {
1438  DASHContext *c = s->priv_data;
1439  int64_t num = 0;
1440 
1441  if (c->is_live && pls->fragment_duration) {
1442  av_log(s, AV_LOG_TRACE, "in live mode\n");
1444  } else {
1445  num = pls->first_seq_no;
1446  }
1447  return num;
1448 }
1449 
1450 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1451 {
1452  int64_t num = 0;
1453 
1454  if (pls->n_fragments) {
1455  num = pls->first_seq_no + pls->n_fragments - 1;
1456  } else if (pls->n_timelines) {
1457  int i = 0;
1458  num = pls->first_seq_no + pls->n_timelines - 1;
1459  for (i = 0; i < pls->n_timelines; i++) {
1460  if (pls->timelines[i]->repeat == -1) {
1461  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1462  num = c->period_duration / length_of_each_segment;
1463  } else {
1464  num += pls->timelines[i]->repeat;
1465  }
1466  }
1467  } else if (c->is_live && pls->fragment_duration) {
1469  } else if (pls->fragment_duration) {
1471  }
1472 
1473  return num;
1474 }
1475 
1476 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1477 {
1478  if (rep_dest && rep_src ) {
1479  free_timelines_list(rep_dest);
1480  rep_dest->timelines = rep_src->timelines;
1481  rep_dest->n_timelines = rep_src->n_timelines;
1482  rep_dest->first_seq_no = rep_src->first_seq_no;
1483  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1484  rep_src->timelines = NULL;
1485  rep_src->n_timelines = 0;
1486  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1487  }
1488 }
1489 
1490 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1491 {
1492  if (rep_dest && rep_src ) {
1493  free_fragment_list(rep_dest);
1494  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1495  rep_dest->cur_seq_no = 0;
1496  else
1497  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1498  rep_dest->fragments = rep_src->fragments;
1499  rep_dest->n_fragments = rep_src->n_fragments;
1500  rep_dest->parent = rep_src->parent;
1501  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1502  rep_src->fragments = NULL;
1503  rep_src->n_fragments = 0;
1504  }
1505 }
1506 
1507 
1509 {
1510  int ret = 0, i;
1511  DASHContext *c = s->priv_data;
1512  // save current context
1513  int n_videos = c->n_videos;
1514  struct representation **videos = c->videos;
1515  int n_audios = c->n_audios;
1516  struct representation **audios = c->audios;
1517  int n_subtitles = c->n_subtitles;
1518  struct representation **subtitles = c->subtitles;
1519  char *base_url = c->base_url;
1520 
1521  c->base_url = NULL;
1522  c->n_videos = 0;
1523  c->videos = NULL;
1524  c->n_audios = 0;
1525  c->audios = NULL;
1526  c->n_subtitles = 0;
1527  c->subtitles = NULL;
1528  ret = parse_manifest(s, s->url, NULL);
1529  if (ret)
1530  goto finish;
1531 
1532  if (c->n_videos != n_videos) {
1533  av_log(c, AV_LOG_ERROR,
1534  "new manifest has mismatched no. of video representations, %d -> %d\n",
1535  n_videos, c->n_videos);
1536  return AVERROR_INVALIDDATA;
1537  }
1538  if (c->n_audios != n_audios) {
1539  av_log(c, AV_LOG_ERROR,
1540  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1541  n_audios, c->n_audios);
1542  return AVERROR_INVALIDDATA;
1543  }
1544  if (c->n_subtitles != n_subtitles) {
1545  av_log(c, AV_LOG_ERROR,
1546  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1547  n_subtitles, c->n_subtitles);
1548  return AVERROR_INVALIDDATA;
1549  }
1550 
1551  for (i = 0; i < n_videos; i++) {
1552  struct representation *cur_video = videos[i];
1553  struct representation *ccur_video = c->videos[i];
1554  if (cur_video->timelines) {
1555  // calc current time
1556  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1557  // update segments
1558  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1559  if (ccur_video->cur_seq_no >= 0) {
1560  move_timelines(ccur_video, cur_video, c);
1561  }
1562  }
1563  if (cur_video->fragments) {
1564  move_segments(ccur_video, cur_video, c);
1565  }
1566  }
1567  for (i = 0; i < n_audios; i++) {
1568  struct representation *cur_audio = audios[i];
1569  struct representation *ccur_audio = c->audios[i];
1570  if (cur_audio->timelines) {
1571  // calc current time
1572  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1573  // update segments
1574  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1575  if (ccur_audio->cur_seq_no >= 0) {
1576  move_timelines(ccur_audio, cur_audio, c);
1577  }
1578  }
1579  if (cur_audio->fragments) {
1580  move_segments(ccur_audio, cur_audio, c);
1581  }
1582  }
1583 
1584 finish:
1585  // restore context
1586  if (c->base_url)
1587  av_free(base_url);
1588  else
1589  c->base_url = base_url;
1590 
1591  if (c->subtitles)
1592  free_subtitle_list(c);
1593  if (c->audios)
1594  free_audio_list(c);
1595  if (c->videos)
1596  free_video_list(c);
1597 
1598  c->n_subtitles = n_subtitles;
1599  c->subtitles = subtitles;
1600  c->n_audios = n_audios;
1601  c->audios = audios;
1602  c->n_videos = n_videos;
1603  c->videos = videos;
1604  return ret;
1605 }
1606 
1607 static struct fragment *get_current_fragment(struct representation *pls)
1608 {
1609  int64_t min_seq_no = 0;
1610  int64_t max_seq_no = 0;
1611  struct fragment *seg = NULL;
1612  struct fragment *seg_ptr = NULL;
1613  DASHContext *c = pls->parent->priv_data;
1614 
1615  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1616  if (pls->cur_seq_no < pls->n_fragments) {
1617  seg_ptr = pls->fragments[pls->cur_seq_no];
1618  seg = av_mallocz(sizeof(struct fragment));
1619  if (!seg) {
1620  return NULL;
1621  }
1622  seg->url = av_strdup(seg_ptr->url);
1623  if (!seg->url) {
1624  av_free(seg);
1625  return NULL;
1626  }
1627  seg->size = seg_ptr->size;
1628  seg->url_offset = seg_ptr->url_offset;
1629  return seg;
1630  } else if (c->is_live) {
1631  refresh_manifest(pls->parent);
1632  } else {
1633  break;
1634  }
1635  }
1636  if (c->is_live) {
1637  min_seq_no = calc_min_seg_no(pls->parent, pls);
1638  max_seq_no = calc_max_seg_no(pls, c);
1639 
1640  if (pls->timelines || pls->fragments) {
1641  refresh_manifest(pls->parent);
1642  }
1643  if (pls->cur_seq_no <= min_seq_no) {
1644  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"], playlist %d\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no, (int)pls->rep_idx);
1645  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1646  } else if (pls->cur_seq_no > max_seq_no) {
1647  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"], playlist %d\n", min_seq_no, max_seq_no, (int)pls->rep_idx);
1648  }
1649  seg = av_mallocz(sizeof(struct fragment));
1650  if (!seg) {
1651  return NULL;
1652  }
1653  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1654  seg = av_mallocz(sizeof(struct fragment));
1655  if (!seg) {
1656  return NULL;
1657  }
1658  }
1659  if (seg) {
1660  char *tmpfilename= av_mallocz(c->max_url_size);
1661  if (!tmpfilename) {
1662  return NULL;
1663  }
1665  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1666  if (!seg->url) {
1667  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1668  seg->url = av_strdup(pls->url_template);
1669  if (!seg->url) {
1670  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1671  av_free(tmpfilename);
1672  return NULL;
1673  }
1674  }
1675  av_free(tmpfilename);
1676  seg->size = -1;
1677  }
1678 
1679  return seg;
1680 }
1681 
1682 static int read_from_url(struct representation *pls, struct fragment *seg,
1683  uint8_t *buf, int buf_size)
1684 {
1685  int ret;
1686 
1687  /* limit read if the fragment was only a part of a file */
1688  if (seg->size >= 0)
1689  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1690 
1691  ret = avio_read(pls->input, buf, buf_size);
1692  if (ret > 0)
1693  pls->cur_seg_offset += ret;
1694 
1695  return ret;
1696 }
1697 
1698 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1699 {
1700  AVDictionary *opts = NULL;
1701  char *url = NULL;
1702  int ret = 0;
1703 
1704  url = av_mallocz(c->max_url_size);
1705  if (!url) {
1706  ret = AVERROR(ENOMEM);
1707  goto cleanup;
1708  }
1709 
1710  if (seg->size >= 0) {
1711  /* try to restrict the HTTP request to the part we want
1712  * (if this is in fact a HTTP request) */
1713  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1714  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1715  }
1716 
1717  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1718  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64", playlist %d\n",
1719  url, seg->url_offset, pls->rep_idx);
1720  ret = open_url(pls->parent, &pls->input, url, c->avio_opts, opts, NULL);
1721 
1722 cleanup:
1723  av_free(url);
1724  av_dict_free(&opts);
1725  pls->cur_seg_offset = 0;
1726  pls->cur_seg_size = seg->size;
1727  return ret;
1728 }
1729 
1730 static int update_init_section(struct representation *pls)
1731 {
1732  static const int max_init_section_size = 1024 * 1024;
1733  DASHContext *c = pls->parent->priv_data;
1734  int64_t sec_size;
1735  int64_t urlsize;
1736  int ret;
1737 
1738  if (!pls->init_section || pls->init_sec_buf)
1739  return 0;
1740 
1741  ret = open_input(c, pls, pls->init_section);
1742  if (ret < 0) {
1744  "Failed to open an initialization section in playlist %d\n",
1745  pls->rep_idx);
1746  return ret;
1747  }
1748 
1749  if (pls->init_section->size >= 0)
1750  sec_size = pls->init_section->size;
1751  else if ((urlsize = avio_size(pls->input)) >= 0)
1752  sec_size = urlsize;
1753  else
1754  sec_size = max_init_section_size;
1755 
1756  av_log(pls->parent, AV_LOG_DEBUG,
1757  "Downloading an initialization section of size %"PRId64"\n",
1758  sec_size);
1759 
1760  sec_size = FFMIN(sec_size, max_init_section_size);
1761 
1762  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1763 
1764  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1765  pls->init_sec_buf_size);
1766  ff_format_io_close(pls->parent, &pls->input);
1767 
1768  if (ret < 0)
1769  return ret;
1770 
1771  pls->init_sec_data_len = ret;
1772  pls->init_sec_buf_read_offset = 0;
1773 
1774  return 0;
1775 }
1776 
1777 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1778 {
1779  struct representation *v = opaque;
1780  if (v->n_fragments && !v->init_sec_data_len) {
1781  return avio_seek(v->input, offset, whence);
1782  }
1783 
1784  return AVERROR(ENOSYS);
1785 }
1786 
1787 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1788 {
1789  int ret = 0;
1790  struct representation *v = opaque;
1791  DASHContext *c = v->parent->priv_data;
1792 
1793 restart:
1794  if (!v->input) {
1795  free_fragment(&v->cur_seg);
1796  v->cur_seg = get_current_fragment(v);
1797  if (!v->cur_seg) {
1798  ret = AVERROR_EOF;
1799  goto end;
1800  }
1801 
1802  /* load/update Media Initialization Section, if any */
1803  ret = update_init_section(v);
1804  if (ret)
1805  goto end;
1806 
1807  ret = open_input(c, v, v->cur_seg);
1808  if (ret < 0) {
1810  ret = AVERROR_EXIT;
1811  goto end;
1812  }
1813  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist %d\n", v->rep_idx);
1814  v->cur_seq_no++;
1815  goto restart;
1816  }
1817  }
1818 
1820  /* Push init section out first before first actual fragment */
1821  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1822  memcpy(buf, v->init_sec_buf, copy_size);
1823  v->init_sec_buf_read_offset += copy_size;
1824  ret = copy_size;
1825  goto end;
1826  }
1827 
1828  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1829  if (!v->cur_seg) {
1830  v->cur_seg = get_current_fragment(v);
1831  }
1832  if (!v->cur_seg) {
1833  ret = AVERROR_EOF;
1834  goto end;
1835  }
1836  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1837  if (ret > 0)
1838  goto end;
1839 
1840  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1841  if (!v->is_restart_needed)
1842  v->cur_seq_no++;
1843  v->is_restart_needed = 1;
1844  }
1845 
1846 end:
1847  return ret;
1848 }
1849 
1851 {
1852  DASHContext *c = s->priv_data;
1853  const char *opts[] = {
1854  "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", NULL };
1855  const char **opt = opts;
1856  uint8_t *buf = NULL;
1857  int ret = 0;
1858 
1859  while (*opt) {
1860  if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
1861  if (buf[0] != '\0') {
1862  ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL);
1863  if (ret < 0) {
1864  av_freep(&buf);
1865  return ret;
1866  }
1867  } else {
1868  av_freep(&buf);
1869  }
1870  }
1871  opt++;
1872  }
1873 
1874  return ret;
1875 }
1876 
1877 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1878  int flags, AVDictionary **opts)
1879 {
1880  av_log(s, AV_LOG_ERROR,
1881  "A DASH playlist item '%s' referred to an external file '%s'. "
1882  "Opening this file was forbidden for security reasons\n",
1883  s->url, url);
1884  return AVERROR(EPERM);
1885 }
1886 
1888 {
1889  /* note: the internal buffer could have changed */
1890  av_freep(&pls->pb.buffer);
1891  memset(&pls->pb, 0x00, sizeof(AVIOContext));
1892  pls->ctx->pb = NULL;
1893  avformat_close_input(&pls->ctx);
1894  pls->ctx = NULL;
1895 }
1896 
1898 {
1899  DASHContext *c = s->priv_data;
1900  ff_const59 AVInputFormat *in_fmt = NULL;
1901  AVDictionary *in_fmt_opts = NULL;
1902  uint8_t *avio_ctx_buffer = NULL;
1903  int ret = 0, i;
1904 
1905  if (pls->ctx) {
1907  }
1908 
1910  ret = AVERROR_EXIT;
1911  goto fail;
1912  }
1913 
1914  if (!(pls->ctx = avformat_alloc_context())) {
1915  ret = AVERROR(ENOMEM);
1916  goto fail;
1917  }
1918 
1919  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1920  if (!avio_ctx_buffer ) {
1921  ret = AVERROR(ENOMEM);
1922  avformat_free_context(pls->ctx);
1923  pls->ctx = NULL;
1924  goto fail;
1925  }
1926  if (c->is_live) {
1927  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, NULL);
1928  } else {
1929  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, seek_data);
1930  }
1931  pls->pb.seekable = 0;
1932 
1933  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1934  goto fail;
1935 
1936  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1937  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;;
1939  ret = av_probe_input_buffer(&pls->pb, &in_fmt, "", NULL, 0, 0);
1940  if (ret < 0) {
1941  av_log(s, AV_LOG_ERROR, "Error when loading first fragment, playlist %d\n", (int)pls->rep_idx);
1942  avformat_free_context(pls->ctx);
1943  pls->ctx = NULL;
1944  goto fail;
1945  }
1946 
1947  pls->ctx->pb = &pls->pb;
1948  pls->ctx->io_open = nested_io_open;
1949 
1950  // provide additional information from mpd if available
1951  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1952  av_dict_free(&in_fmt_opts);
1953  if (ret < 0)
1954  goto fail;
1955  if (pls->n_fragments) {
1956 #if FF_API_R_FRAME_RATE
1957  if (pls->framerate.den) {
1958  for (i = 0; i < pls->ctx->nb_streams; i++)
1959  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1960  }
1961 #endif
1962  ret = avformat_find_stream_info(pls->ctx, NULL);
1963  if (ret < 0)
1964  goto fail;
1965  }
1966 
1967 fail:
1968  return ret;
1969 }
1970 
1972 {
1973  int ret = 0;
1974  int i;
1975 
1976  pls->parent = s;
1977  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1978 
1979  if (!pls->last_seq_no) {
1980  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1981  }
1982 
1983  ret = reopen_demux_for_component(s, pls);
1984  if (ret < 0) {
1985  goto fail;
1986  }
1987  for (i = 0; i < pls->ctx->nb_streams; i++) {
1988  AVStream *st = avformat_new_stream(s, NULL);
1989  AVStream *ist = pls->ctx->streams[i];
1990  if (!st) {
1991  ret = AVERROR(ENOMEM);
1992  goto fail;
1993  }
1994  st->id = i;
1997  }
1998 
1999  return 0;
2000 fail:
2001  return ret;
2002 }
2003 
2004 static int is_common_init_section_exist(struct representation **pls, int n_pls)
2005 {
2006  struct fragment *first_init_section = pls[0]->init_section;
2007  char *url =NULL;
2008  int64_t url_offset = -1;
2009  int64_t size = -1;
2010  int i = 0;
2011 
2012  if (first_init_section == NULL || n_pls == 0)
2013  return 0;
2014 
2015  url = first_init_section->url;
2016  url_offset = first_init_section->url_offset;
2017  size = pls[0]->init_section->size;
2018  for (i=0;i<n_pls;i++) {
2019  if (av_strcasecmp(pls[i]->init_section->url,url) || pls[i]->init_section->url_offset != url_offset || pls[i]->init_section->size != size) {
2020  return 0;
2021  }
2022  }
2023  return 1;
2024 }
2025 
2026 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
2027 {
2028  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
2029  if (!rep_dest->init_sec_buf) {
2030  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
2031  return AVERROR(ENOMEM);
2032  }
2033  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2034  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2035  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2036  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2037 
2038  return 0;
2039 }
2040 
2041 
2043 {
2044  DASHContext *c = s->priv_data;
2045  struct representation *rep;
2046  int ret = 0;
2047  int stream_index = 0;
2048  int i;
2049 
2051 
2052  if ((ret = save_avio_options(s)) < 0)
2053  goto fail;
2054 
2055  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2056  goto fail;
2057 
2058  /* If this isn't a live stream, fill the total duration of the
2059  * stream. */
2060  if (!c->is_live) {
2062  } else {
2063  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2064  }
2065 
2066  if(c->n_videos)
2068 
2069  /* Open the demuxer for video and audio components if available */
2070  for (i = 0; i < c->n_videos; i++) {
2071  rep = c->videos[i];
2072  if (i > 0 && c->is_init_section_common_video) {
2073  ret = copy_init_section(rep, c->videos[0]);
2074  if (ret < 0)
2075  goto fail;
2076  }
2077  ret = open_demux_for_component(s, rep);
2078 
2079  if (ret)
2080  goto fail;
2081  rep->stream_index = stream_index;
2082  ++stream_index;
2083  }
2084 
2085  if(c->n_audios)
2087 
2088  for (i = 0; i < c->n_audios; i++) {
2089  rep = c->audios[i];
2090  if (i > 0 && c->is_init_section_common_audio) {
2091  ret = copy_init_section(rep, c->audios[0]);
2092  if (ret < 0)
2093  goto fail;
2094  }
2095  ret = open_demux_for_component(s, rep);
2096 
2097  if (ret)
2098  goto fail;
2099  rep->stream_index = stream_index;
2100  ++stream_index;
2101  }
2102 
2103  if (c->n_subtitles)
2105 
2106  for (i = 0; i < c->n_subtitles; i++) {
2107  rep = c->subtitles[i];
2108  if (i > 0 && c->is_init_section_common_audio) {
2109  ret = copy_init_section(rep, c->subtitles[0]);
2110  if (ret < 0)
2111  goto fail;
2112  }
2113  ret = open_demux_for_component(s, rep);
2114 
2115  if (ret)
2116  goto fail;
2117  rep->stream_index = stream_index;
2118  ++stream_index;
2119  }
2120 
2121  if (!stream_index) {
2122  ret = AVERROR_INVALIDDATA;
2123  goto fail;
2124  }
2125 
2126  /* Create a program */
2127  if (!ret) {
2128  AVProgram *program;
2129  program = av_new_program(s, 0);
2130  if (!program) {
2131  goto fail;
2132  }
2133 
2134  for (i = 0; i < c->n_videos; i++) {
2135  rep = c->videos[i];
2137  rep->assoc_stream = s->streams[rep->stream_index];
2138  if (rep->bandwidth > 0)
2139  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2140  if (rep->id[0])
2141  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2142  }
2143  for (i = 0; i < c->n_audios; i++) {
2144  rep = c->audios[i];
2146  rep->assoc_stream = s->streams[rep->stream_index];
2147  if (rep->bandwidth > 0)
2148  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2149  if (rep->id[0])
2150  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2151  }
2152  for (i = 0; i < c->n_subtitles; i++) {
2153  rep = c->subtitles[i];
2155  rep->assoc_stream = s->streams[rep->stream_index];
2156  if (rep->id[0])
2157  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2158  }
2159  }
2160 
2161  return 0;
2162 fail:
2163  return ret;
2164 }
2165 
2167 {
2168  int i, j;
2169 
2170  for (i = 0; i < n; i++) {
2171  struct representation *pls = p[i];
2172  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2173 
2174  if (needed && !pls->ctx) {
2175  pls->cur_seg_offset = 0;
2176  pls->init_sec_buf_read_offset = 0;
2177  /* Catch up */
2178  for (j = 0; j < n; j++) {
2179  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2180  }
2182  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2183  } else if (!needed && pls->ctx) {
2185  ff_format_io_close(pls->parent, &pls->input);
2186  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2187  }
2188  }
2189 }
2190 
2192 {
2193  DASHContext *c = s->priv_data;
2194  int ret = 0, i;
2195  int64_t mints = 0;
2196  struct representation *cur = NULL;
2197  struct representation *rep = NULL;
2198 
2202 
2203  for (i = 0; i < c->n_videos; i++) {
2204  rep = c->videos[i];
2205  if (!rep->ctx)
2206  continue;
2207  if (!cur || rep->cur_timestamp < mints) {
2208  cur = rep;
2209  mints = rep->cur_timestamp;
2210  }
2211  }
2212  for (i = 0; i < c->n_audios; i++) {
2213  rep = c->audios[i];
2214  if (!rep->ctx)
2215  continue;
2216  if (!cur || rep->cur_timestamp < mints) {
2217  cur = rep;
2218  mints = rep->cur_timestamp;
2219  }
2220  }
2221 
2222  for (i = 0; i < c->n_subtitles; i++) {
2223  rep = c->subtitles[i];
2224  if (!rep->ctx)
2225  continue;
2226  if (!cur || rep->cur_timestamp < mints) {
2227  cur = rep;
2228  mints = rep->cur_timestamp;
2229  }
2230  }
2231 
2232  if (!cur) {
2233  return AVERROR_INVALIDDATA;
2234  }
2235  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2236  ret = av_read_frame(cur->ctx, pkt);
2237  if (ret >= 0) {
2238  /* If we got a packet, return it */
2239  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2240  pkt->stream_index = cur->stream_index;
2241  return 0;
2242  }
2243  if (cur->is_restart_needed) {
2244  cur->cur_seg_offset = 0;
2245  cur->init_sec_buf_read_offset = 0;
2246  ff_format_io_close(cur->parent, &cur->input);
2247  ret = reopen_demux_for_component(s, cur);
2248  cur->is_restart_needed = 0;
2249  }
2250  }
2251  return AVERROR_EOF;
2252 }
2253 
2255 {
2256  DASHContext *c = s->priv_data;
2257  free_audio_list(c);
2258  free_video_list(c);
2259  av_dict_free(&c->avio_opts);
2260  av_freep(&c->base_url);
2261  return 0;
2262 }
2263 
2264 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2265 {
2266  int ret = 0;
2267  int i = 0;
2268  int j = 0;
2269  int64_t duration = 0;
2270 
2271  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms], playlist %d%s\n",
2272  seek_pos_msec, pls->rep_idx, dry_run ? " (dry)" : "");
2273 
2274  // single fragment mode
2275  if (pls->n_fragments == 1) {
2276  pls->cur_timestamp = 0;
2277  pls->cur_seg_offset = 0;
2278  if (dry_run)
2279  return 0;
2280  ff_read_frame_flush(pls->ctx);
2281  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2282  }
2283 
2284  ff_format_io_close(pls->parent, &pls->input);
2285 
2286  // find the nearest fragment
2287  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2288  int64_t num = pls->first_seq_no;
2289  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2290  "last_seq_no[%"PRId64"], playlist %d.\n",
2291  (int)pls->n_timelines, (int64_t)pls->last_seq_no, (int)pls->rep_idx);
2292  for (i = 0; i < pls->n_timelines; i++) {
2293  if (pls->timelines[i]->starttime > 0) {
2294  duration = pls->timelines[i]->starttime;
2295  }
2296  duration += pls->timelines[i]->duration;
2297  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2298  goto set_seq_num;
2299  }
2300  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2301  duration += pls->timelines[i]->duration;
2302  num++;
2303  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2304  goto set_seq_num;
2305  }
2306  }
2307  num++;
2308  }
2309 
2310 set_seq_num:
2311  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2312  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"], playlist %d.\n",
2313  (int64_t)pls->cur_seq_no, (int)pls->rep_idx);
2314  } else if (pls->fragment_duration > 0) {
2315  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2316  } else {
2317  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2318  pls->cur_seq_no = pls->first_seq_no;
2319  }
2320  pls->cur_timestamp = 0;
2321  pls->cur_seg_offset = 0;
2322  pls->init_sec_buf_read_offset = 0;
2323  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2324 
2325  return ret;
2326 }
2327 
2328 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2329 {
2330  int ret = 0, i;
2331  DASHContext *c = s->priv_data;
2332  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2333  s->streams[stream_index]->time_base.den,
2334  flags & AVSEEK_FLAG_BACKWARD ?
2336  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2337  return AVERROR(ENOSYS);
2338 
2339  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2340  for (i = 0; i < c->n_videos; i++) {
2341  if (!ret)
2342  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2343  }
2344  for (i = 0; i < c->n_audios; i++) {
2345  if (!ret)
2346  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2347  }
2348  for (i = 0; i < c->n_subtitles; i++) {
2349  if (!ret)
2350  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2351  }
2352 
2353  return ret;
2354 }
2355 
2356 static int dash_probe(const AVProbeData *p)
2357 {
2358  if (!av_stristr(p->buf, "<MPD"))
2359  return 0;
2360 
2361  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2362  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2363  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2364  av_stristr(p->buf, "dash:profile:isoff-main:2011")) {
2365  return AVPROBE_SCORE_MAX;
2366  }
2367  if (av_stristr(p->buf, "dash:profile")) {
2368  return AVPROBE_SCORE_MAX;
2369  }
2370 
2371  return 0;
2372 }
2373 
2374 #define OFFSET(x) offsetof(DASHContext, x)
2375 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2376 static const AVOption dash_options[] = {
2377  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2378  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2379  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm"},
2380  INT_MIN, INT_MAX, FLAGS},
2381  {NULL}
2382 };
2383 
2384 static const AVClass dash_class = {
2385  .class_name = "dash",
2386  .item_name = av_default_item_name,
2387  .option = dash_options,
2388  .version = LIBAVUTIL_VERSION_INT,
2389 };
2390 
2392  .name = "dash",
2393  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2394  .priv_class = &dash_class,
2395  .priv_data_size = sizeof(DASHContext),
2402 };
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:568
int64_t cur_seg_size
Definition: dashdec.c:109
#define FLAGS
Definition: dashdec.c:2375
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2518
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1524
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1940
AVIOContext * input
Definition: dashdec.c:78
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
void ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:80
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:339
int64_t url_offset
Definition: dashdec.c:34
int n_fragments
Definition: dashdec.c:92
char * allowed_extensions
Definition: dashdec.c:162
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1636
AVOption.
Definition: opt.h:246
int n_audios
Definition: dashdec.c:141
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:262
int n_timelines
Definition: dashdec.c:95
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVPacket pkt
Definition: dashdec.c:81
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1787
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4929
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: utils.c:164
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle...
Definition: avstring.c:56
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:153
static int ishttp(char *url)
Definition: dashdec.c:172
int num
Numerator.
Definition: rational.h:59
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:246
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
#define AVIO_FLAG_READ
read-only
Definition: avio.h:674
int64_t size
Definition: dashdec.c:35
unsigned char * buffer
Start of the buffer.
Definition: avio.h:226
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:223
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1607
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1682
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:539
static const AVOption dash_options[]
Definition: dashdec.c:2376
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1777
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
discard all
Definition: avcodec.h:814
static AVPacket pkt
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:312
int64_t cur_timestamp
Definition: dashdec.c:118
int n_videos
Definition: dashdec.c:139
uint64_t availability_end_time
Definition: dashdec.c:150
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:608
int is_init_section_common_audio
Definition: dashdec.c:168
uint64_t min_buffer_time
Definition: dashdec.c:154
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:327
Format I/O context.
Definition: avformat.h:1358
#define MAX_URL_SIZE
Definition: internal.h:30
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: utils.c:1928
struct fragment * init_section
Definition: dashdec.c:113
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:117
int stream_index
Definition: dashdec.c:84
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:188
static int64_t start_time
Definition: ffplay.c:331
uint64_t suggested_presentation_delay
Definition: dashdec.c:148
uint8_t
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1181
Round toward +infinity.
Definition: mathematics.h:83
#define av_malloc(s)
uint64_t media_presentation_duration
Definition: dashdec.c:147
AVOptions.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
char * adaptionset_maxframerate_val
Definition: dashdec.c:135
int64_t presentation_timeoffset
Definition: dashdec.c:105
int id
Format-specific stream ID.
Definition: avformat.h:888
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2254
void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: utils.c:5684
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
uint64_t period_duration
Definition: dashdec.c:157
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4502
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
int64_t duration
Definition: movenc.c:63
int64_t first_seq_no
Definition: dashdec.c:98
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
AVIOContext pb
Definition: dashdec.c:77
static void finish(void)
Definition: movenc.c:345
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1489
AVProgram * av_new_program(AVFormatContext *s, int id)
Definition: utils.c:4601
struct timeline ** timelines
Definition: dashdec.c:96
#define AVERROR_EOF
End of file.
Definition: error.h:55
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:38
uint64_t publish_time
Definition: dashdec.c:151
ptrdiff_t size
Definition: opengl_enc.c:100
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2166
uint64_t availability_start_time
Definition: dashdec.c:149
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:564
#define av_log(a,...)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:650
struct representation ** audios
Definition: dashdec.c:142
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:31
char * adaptionset_bitstreamswitching_val
Definition: dashdec.c:137
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:547
static int aligned(int val)
Definition: dashdec.c:178
struct representation ** subtitles
Definition: dashdec.c:144
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
#define ff_const59
The ff_const59 define is not part of the public API and will be removed without further warning...
Definition: avformat.h:549
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2040
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1598
char * adaptionset_minbw_val
Definition: dashdec.c:128
uint32_t init_sec_data_len
Definition: dashdec.c:116
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:347
int64_t starttime
Definition: dashdec.c:57
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1490
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1450
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1398
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1217
char * url
input or output URL.
Definition: avformat.h:1454
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:2004
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2328
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:235
char * adaptionset_par_val
Definition: dashdec.c:126
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1275
#define FFMAX(a, b)
Definition: common.h:94
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:122
char * adaptionset_segmentalignment_val
Definition: dashdec.c:136
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:448
uint64_t minimum_update_period
Definition: dashdec.c:152
struct fragment ** fragments
Definition: dashdec.c:93
static void free_representation(struct representation *pls)
Definition: dashdec.c:358
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:161
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:387
AVDictionary * opts
Definition: movenc.c:50
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
#define dynarray_add(tab, nb_ptr, elem)
Definition: internal.h:196
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
#define FFMIN(a, b)
Definition: common.h:96
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:213
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:336
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:3
int32_t
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1476
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2356
int n_subtitles
Definition: dashdec.c:143
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:218
#define s(width, name)
Definition: cbs_vp9.c:257
int is_live
Definition: dashdec.c:160
#define OFFSET(x)
Definition: dashdec.c:2374
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:409
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1210
int n
Definition: avisynth_c.h:760
AVDictionary * metadata
Definition: avformat.h:945
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don&#39;t avio_close() it.
Definition: avformat.h:1497
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
static int save_avio_options(AVFormatContext *s)
Definition: dashdec.c:1850
char * url
Definition: dashdec.c:36
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
uint64_t period_start
Definition: dashdec.c:158
char * adaptionset_contenttype_val
Definition: dashdec.c:125
char * adaptionset_maxwidth_val
Definition: dashdec.c:131
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1532
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:531
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1877
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:530
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:881
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:96
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVFormatContext * parent
Definition: dashdec.c:79
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:663
AVIOContext * pb
I/O context.
Definition: avformat.h:1400
int64_t last_seq_no
Definition: dashdec.c:99
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint32_t init_sec_buf_size
Definition: dashdec.c:115
int64_t cur_seq_no
Definition: dashdec.c:107
int max_url_size
Definition: dashdec.c:164
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:2026
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2042
void * buf
Definition: avisynth_c.h:766
uint64_t time_shift_buffer_depth
Definition: dashdec.c:153
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1698
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2191
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:711
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2519
AVMediaType
Definition: avutil.h:199
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:590
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:675
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1211
char id[20]
Definition: dashdec.c:87
AVDictionary * avio_opts
Definition: dashdec.c:163
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4433
This structure contains the data a format has to probe a file.
Definition: avformat.h:446
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1777
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
Round toward -infinity.
Definition: mathematics.h:82
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:472
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2391
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2264
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2522
char * adaptionset_lang_val
Definition: dashdec.c:127
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1730
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:458
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:34
char * adaptionset_maxbw_val
Definition: dashdec.c:129
char * adaptionset_minheight_val
Definition: dashdec.c:132
int
char * adaptionset_minframerate_val
Definition: dashdec.c:134
int64_t duration
Definition: dashdec.c:67
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:81
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3600
static const AVClass dash_class
Definition: dashdec.c:2384
int64_t fragment_duration
Definition: dashdec.c:102
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
struct fragment * cur_seg
Definition: dashdec.c:110
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1073
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:398
int bandwidth
Definition: dashdec.c:88
int den
Denominator.
Definition: rational.h:60
AVFormatContext * ctx
Definition: dashdec.c:80
int rep_count
Definition: dashdec.c:83
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4474
int64_t fragment_timescale
Definition: dashdec.c:103
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1887
int is_restart_needed
Definition: dashdec.c:119
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:761
#define av_free(p)
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:475
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1119
uint8_t * init_sec_buf
Definition: dashdec.c:114
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:475
AVRational framerate
Definition: dashdec.c:89
void * priv_data
Format private data.
Definition: avformat.h:1386
int64_t start_number
Definition: dashdec.c:100
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:183
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:830
char * adaptionset_minwidth_val
Definition: dashdec.c:130
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1436
int64_t cur_seg_offset
Definition: dashdec.c:108
struct representation ** videos
Definition: dashdec.c:140
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1473
static void free_video_list(DASHContext *c)
Definition: dashdec.c:376
#define av_freep(p)
void INT64 start
Definition: avisynth_c.h:766
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:654
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
char * url_template
Definition: dashdec.c:76
int is_init_section_common_video
Definition: dashdec.c:167
int stream_index
Definition: avcodec.h:1482
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
char * adaptionset_maxheight_val
Definition: dashdec.c:133
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1897
int64_t repeat
Definition: dashdec.c:63
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:936
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1005
This structure stores compressed data.
Definition: avcodec.h:1457
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:297
char * base_url
Definition: dashdec.c:124
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1473
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1971
GLuint buffer
Definition: opengl_enc.c:101
AVStream * assoc_stream
Definition: dashdec.c:90
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:127
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1508
static uint8_t tmp[11]
Definition: aes_ctr.c:26