FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 
31 #define INITIAL_BUFFER_SIZE 32768
32 
33 struct fragment {
34  int64_t url_offset;
35  int64_t size;
36  char *url;
37 };
38 
39 /*
40  * reference to : ISO_IEC_23009-1-DASH-2012
41  * Section: 5.3.9.6.2
42  * Table: Table 17 — Semantics of SegmentTimeline element
43  * */
44 struct timeline {
45  /* starttime: Element or Attribute Name
46  * specifies the MPD start time, in @timescale units,
47  * the first Segment in the series starts relative to the beginning of the Period.
48  * The value of this attribute must be equal to or greater than the sum of the previous S
49  * element earliest presentation time and the sum of the contiguous Segment durations.
50  * If the value of the attribute is greater than what is expressed by the previous S element,
51  * it expresses discontinuities in the timeline.
52  * If not present then the value shall be assumed to be zero for the first S element
53  * and for the subsequent S elements, the value shall be assumed to be the sum of
54  * the previous S element's earliest presentation time and contiguous duration
55  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
56  * */
57  int64_t starttime;
58  /* repeat: Element or Attribute Name
59  * specifies the repeat count of the number of following contiguous Segments with
60  * the same duration expressed by the value of @duration. This value is zero-based
61  * (e.g. a value of three means four Segments in the contiguous series).
62  * */
63  int64_t repeat;
64  /* duration: Element or Attribute Name
65  * specifies the Segment duration, in units of the value of the @timescale.
66  * */
67  int64_t duration;
68 };
69 
70 /*
71  * Each playlist has its own demuxer. If it is currently active,
72  * it has an opened AVIOContext too, and potentially an AVPacket
73  * containing the next packet from this stream.
74  */
76  char *url_template;
82  int rep_idx;
83  int rep_count;
85 
87  char id[20];
88  int bandwidth;
90  AVStream *assoc_stream; /* demuxer stream associated with this representation */
91 
93  struct fragment **fragments; /* VOD list of fragment for profile */
94 
96  struct timeline **timelines;
97 
98  int64_t first_seq_no;
99  int64_t last_seq_no;
100  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
101 
104 
106 
107  int64_t cur_seq_no;
108  int64_t cur_seg_offset;
109  int64_t cur_seg_size;
110  struct fragment *cur_seg;
111 
112  /* Currently active Media Initialization Section */
118  int64_t cur_timestamp;
120 };
121 
122 typedef struct DASHContext {
123  const AVClass *class;
124  char *base_url;
125 
126  int n_videos;
128  int n_audios;
132 
133  /* MediaPresentationDescription Attribute */
138  uint64_t publish_time;
141  uint64_t min_buffer_time;
142 
143  /* Period Attribute */
144  uint64_t period_duration;
145  uint64_t period_start;
146 
147  int is_live;
152 
153  /* Flags for init section*/
156 
157 } DASHContext;
158 
159 static int ishttp(char *url)
160 {
161  const char *proto_name = avio_find_protocol_name(url);
162  return av_strstart(proto_name, "http", NULL);
163 }
164 
165 static int aligned(int val)
166 {
167  return ((val + 0x3F) >> 6) << 6;
168 }
169 
170 static uint64_t get_current_time_in_sec(void)
171 {
172  return av_gettime() / 1000000;
173 }
174 
175 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
176 {
177  struct tm timeinfo;
178  int year = 0;
179  int month = 0;
180  int day = 0;
181  int hour = 0;
182  int minute = 0;
183  int ret = 0;
184  float second = 0.0;
185 
186  /* ISO-8601 date parser */
187  if (!datetime)
188  return 0;
189 
190  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
191  /* year, month, day, hour, minute, second 6 arguments */
192  if (ret != 6) {
193  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
194  }
195  timeinfo.tm_year = year - 1900;
196  timeinfo.tm_mon = month - 1;
197  timeinfo.tm_mday = day;
198  timeinfo.tm_hour = hour;
199  timeinfo.tm_min = minute;
200  timeinfo.tm_sec = (int)second;
201 
202  return av_timegm(&timeinfo);
203 }
204 
205 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
206 {
207  /* ISO-8601 duration parser */
208  uint32_t days = 0;
209  uint32_t hours = 0;
210  uint32_t mins = 0;
211  uint32_t secs = 0;
212  int size = 0;
213  float value = 0;
214  char type = '\0';
215  const char *ptr = duration;
216 
217  while (*ptr) {
218  if (*ptr == 'P' || *ptr == 'T') {
219  ptr++;
220  continue;
221  }
222 
223  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
224  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
225  return 0; /* parser error */
226  }
227  switch (type) {
228  case 'D':
229  days = (uint32_t)value;
230  break;
231  case 'H':
232  hours = (uint32_t)value;
233  break;
234  case 'M':
235  mins = (uint32_t)value;
236  break;
237  case 'S':
238  secs = (uint32_t)value;
239  break;
240  default:
241  // handle invalid type
242  break;
243  }
244  ptr += size;
245  }
246  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
247 }
248 
249 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
250 {
251  int64_t start_time = 0;
252  int64_t i = 0;
253  int64_t j = 0;
254  int64_t num = 0;
255 
256  if (pls->n_timelines) {
257  for (i = 0; i < pls->n_timelines; i++) {
258  if (pls->timelines[i]->starttime > 0) {
259  start_time = pls->timelines[i]->starttime;
260  }
261  if (num == cur_seq_no)
262  goto finish;
263 
264  start_time += pls->timelines[i]->duration;
265 
266  if (pls->timelines[i]->repeat == -1) {
267  start_time = pls->timelines[i]->duration * cur_seq_no;
268  goto finish;
269  }
270 
271  for (j = 0; j < pls->timelines[i]->repeat; j++) {
272  num++;
273  if (num == cur_seq_no)
274  goto finish;
275  start_time += pls->timelines[i]->duration;
276  }
277  num++;
278  }
279  }
280 finish:
281  return start_time;
282 }
283 
284 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
285 {
286  int64_t i = 0;
287  int64_t j = 0;
288  int64_t num = 0;
289  int64_t start_time = 0;
290 
291  for (i = 0; i < pls->n_timelines; i++) {
292  if (pls->timelines[i]->starttime > 0) {
293  start_time = pls->timelines[i]->starttime;
294  }
295  if (start_time > cur_time)
296  goto finish;
297 
298  start_time += pls->timelines[i]->duration;
299  for (j = 0; j < pls->timelines[i]->repeat; j++) {
300  num++;
301  if (start_time > cur_time)
302  goto finish;
303  start_time += pls->timelines[i]->duration;
304  }
305  num++;
306  }
307 
308  return -1;
309 
310 finish:
311  return num;
312 }
313 
314 static void free_fragment(struct fragment **seg)
315 {
316  if (!(*seg)) {
317  return;
318  }
319  av_freep(&(*seg)->url);
320  av_freep(seg);
321 }
322 
323 static void free_fragment_list(struct representation *pls)
324 {
325  int i;
326 
327  for (i = 0; i < pls->n_fragments; i++) {
328  free_fragment(&pls->fragments[i]);
329  }
330  av_freep(&pls->fragments);
331  pls->n_fragments = 0;
332 }
333 
334 static void free_timelines_list(struct representation *pls)
335 {
336  int i;
337 
338  for (i = 0; i < pls->n_timelines; i++) {
339  av_freep(&pls->timelines[i]);
340  }
341  av_freep(&pls->timelines);
342  pls->n_timelines = 0;
343 }
344 
345 static void free_representation(struct representation *pls)
346 {
347  free_fragment_list(pls);
348  free_timelines_list(pls);
349  free_fragment(&pls->cur_seg);
351  av_freep(&pls->init_sec_buf);
352  av_freep(&pls->pb.buffer);
353  if (pls->input)
354  ff_format_io_close(pls->parent, &pls->input);
355  if (pls->ctx) {
356  pls->ctx->pb = NULL;
357  avformat_close_input(&pls->ctx);
358  }
359 
360  av_freep(&pls->url_template);
361  av_freep(&pls);
362 }
363 
365 {
366  int i;
367  for (i = 0; i < c->n_videos; i++) {
368  struct representation *pls = c->videos[i];
369  free_representation(pls);
370  }
371  av_freep(&c->videos);
372  c->n_videos = 0;
373 }
374 
376 {
377  int i;
378  for (i = 0; i < c->n_audios; i++) {
379  struct representation *pls = c->audios[i];
380  free_representation(pls);
381  }
382  av_freep(&c->audios);
383  c->n_audios = 0;
384 }
385 
387 {
388  int i;
389  for (i = 0; i < c->n_subtitles; i++) {
390  struct representation *pls = c->subtitles[i];
391  free_representation(pls);
392  }
393  av_freep(&c->subtitles);
394  c->n_subtitles = 0;
395 }
396 
397 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
398  AVDictionary *opts, AVDictionary *opts2, int *is_http)
399 {
400  DASHContext *c = s->priv_data;
401  AVDictionary *tmp = NULL;
402  const char *proto_name = NULL;
403  int ret;
404 
405  av_dict_copy(&tmp, opts, 0);
406  av_dict_copy(&tmp, opts2, 0);
407 
408  if (av_strstart(url, "crypto", NULL)) {
409  if (url[6] == '+' || url[6] == ':')
410  proto_name = avio_find_protocol_name(url + 7);
411  }
412 
413  if (!proto_name)
414  proto_name = avio_find_protocol_name(url);
415 
416  if (!proto_name)
417  return AVERROR_INVALIDDATA;
418 
419  // only http(s) & file are allowed
420  if (av_strstart(proto_name, "file", NULL)) {
421  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
423  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
424  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
425  url);
426  return AVERROR_INVALIDDATA;
427  }
428  } else if (av_strstart(proto_name, "http", NULL)) {
429  ;
430  } else
431  return AVERROR_INVALIDDATA;
432 
433  if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
434  ;
435  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':')
436  ;
437  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
438  return AVERROR_INVALIDDATA;
439 
440  av_freep(pb);
441  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
442  if (ret >= 0) {
443  // update cookies on http response with setcookies.
444  char *new_cookies = NULL;
445 
446  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
447  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
448 
449  if (new_cookies) {
450  av_dict_set(&opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
451  }
452 
453  }
454 
455  av_dict_free(&tmp);
456 
457  if (is_http)
458  *is_http = av_strstart(proto_name, "http", NULL);
459 
460  return ret;
461 }
462 
463 static char *get_content_url(xmlNodePtr *baseurl_nodes,
464  int n_baseurl_nodes,
465  int max_url_size,
466  char *rep_id_val,
467  char *rep_bandwidth_val,
468  char *val)
469 {
470  int i;
471  char *text;
472  char *url = NULL;
473  char *tmp_str = av_mallocz(max_url_size);
474  char *tmp_str_2 = av_mallocz(max_url_size);
475 
476  if (!tmp_str || !tmp_str_2) {
477  return NULL;
478  }
479 
480  for (i = 0; i < n_baseurl_nodes; ++i) {
481  if (baseurl_nodes[i] &&
482  baseurl_nodes[i]->children &&
483  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
484  text = xmlNodeGetContent(baseurl_nodes[i]->children);
485  if (text) {
486  memset(tmp_str, 0, max_url_size);
487  memset(tmp_str_2, 0, max_url_size);
488  ff_make_absolute_url(tmp_str_2, max_url_size, tmp_str, text);
489  av_strlcpy(tmp_str, tmp_str_2, max_url_size);
490  xmlFree(text);
491  }
492  }
493  }
494 
495  if (val)
496  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
497 
498  if (rep_id_val) {
499  url = av_strireplace(tmp_str, "$RepresentationID$", (const char*)rep_id_val);
500  if (!url) {
501  goto end;
502  }
503  av_strlcpy(tmp_str, url, max_url_size);
504  }
505  if (rep_bandwidth_val && tmp_str[0] != '\0') {
506  // free any previously assigned url before reassigning
507  av_free(url);
508  url = av_strireplace(tmp_str, "$Bandwidth$", (const char*)rep_bandwidth_val);
509  if (!url) {
510  goto end;
511  }
512  }
513 end:
514  av_free(tmp_str);
515  av_free(tmp_str_2);
516  return url;
517 }
518 
519 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
520 {
521  int i;
522  char *val;
523 
524  for (i = 0; i < n_nodes; ++i) {
525  if (nodes[i]) {
526  val = xmlGetProp(nodes[i], attrname);
527  if (val)
528  return val;
529  }
530  }
531 
532  return NULL;
533 }
534 
535 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
536 {
537  xmlNodePtr node = rootnode;
538  if (!node) {
539  return NULL;
540  }
541 
542  node = xmlFirstElementChild(node);
543  while (node) {
544  if (!av_strcasecmp(node->name, nodename)) {
545  return node;
546  }
547  node = xmlNextElementSibling(node);
548  }
549  return NULL;
550 }
551 
552 static enum AVMediaType get_content_type(xmlNodePtr node)
553 {
555  int i = 0;
556  const char *attr;
557  char *val = NULL;
558 
559  if (node) {
560  for (i = 0; i < 2; i++) {
561  attr = i ? "mimeType" : "contentType";
562  val = xmlGetProp(node, attr);
563  if (val) {
564  if (av_stristr((const char *)val, "video")) {
566  } else if (av_stristr((const char *)val, "audio")) {
568  } else if (av_stristr((const char *)val, "text")) {
570  }
571  xmlFree(val);
572  }
573  }
574  }
575  return type;
576 }
577 
578 static struct fragment * get_Fragment(char *range)
579 {
580  struct fragment * seg = av_mallocz(sizeof(struct fragment));
581 
582  if (!seg)
583  return NULL;
584 
585  seg->size = -1;
586  if (range) {
587  char *str_end_offset;
588  char *str_offset = av_strtok(range, "-", &str_end_offset);
589  seg->url_offset = strtoll(str_offset, NULL, 10);
590  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset;
591  }
592 
593  return seg;
594 }
595 
597  xmlNodePtr fragmenturl_node,
598  xmlNodePtr *baseurl_nodes,
599  char *rep_id_val,
600  char *rep_bandwidth_val)
601 {
602  DASHContext *c = s->priv_data;
603  char *initialization_val = NULL;
604  char *media_val = NULL;
605  char *range_val = NULL;
606  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
607 
608  if (!av_strcasecmp(fragmenturl_node->name, (const char *)"Initialization")) {
609  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
610  range_val = xmlGetProp(fragmenturl_node, "range");
611  if (initialization_val || range_val) {
612  rep->init_section = get_Fragment(range_val);
613  if (!rep->init_section) {
614  xmlFree(initialization_val);
615  xmlFree(range_val);
616  return AVERROR(ENOMEM);
617  }
618  rep->init_section->url = get_content_url(baseurl_nodes, 4,
619  max_url_size,
620  rep_id_val,
621  rep_bandwidth_val,
622  initialization_val);
623 
624  if (!rep->init_section->url) {
625  av_free(rep->init_section);
626  xmlFree(initialization_val);
627  xmlFree(range_val);
628  return AVERROR(ENOMEM);
629  }
630  xmlFree(initialization_val);
631  xmlFree(range_val);
632  }
633  } else if (!av_strcasecmp(fragmenturl_node->name, (const char *)"SegmentURL")) {
634  media_val = xmlGetProp(fragmenturl_node, "media");
635  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
636  if (media_val || range_val) {
637  struct fragment *seg = get_Fragment(range_val);
638  if (!seg) {
639  xmlFree(media_val);
640  xmlFree(range_val);
641  return AVERROR(ENOMEM);
642  }
643  seg->url = get_content_url(baseurl_nodes, 4,
644  max_url_size,
645  rep_id_val,
646  rep_bandwidth_val,
647  media_val);
648  if (!seg->url) {
649  av_free(seg);
650  xmlFree(media_val);
651  xmlFree(range_val);
652  return AVERROR(ENOMEM);
653  }
654  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
655  xmlFree(media_val);
656  xmlFree(range_val);
657  }
658  }
659 
660  return 0;
661 }
662 
664  xmlNodePtr fragment_timeline_node)
665 {
666  xmlAttrPtr attr = NULL;
667  char *val = NULL;
668 
669  if (!av_strcasecmp(fragment_timeline_node->name, (const char *)"S")) {
670  struct timeline *tml = av_mallocz(sizeof(struct timeline));
671  if (!tml) {
672  return AVERROR(ENOMEM);
673  }
674  attr = fragment_timeline_node->properties;
675  while (attr) {
676  val = xmlGetProp(fragment_timeline_node, attr->name);
677 
678  if (!val) {
679  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
680  continue;
681  }
682 
683  if (!av_strcasecmp(attr->name, (const char *)"t")) {
684  tml->starttime = (int64_t)strtoll(val, NULL, 10);
685  } else if (!av_strcasecmp(attr->name, (const char *)"r")) {
686  tml->repeat =(int64_t) strtoll(val, NULL, 10);
687  } else if (!av_strcasecmp(attr->name, (const char *)"d")) {
688  tml->duration = (int64_t)strtoll(val, NULL, 10);
689  }
690  attr = attr->next;
691  xmlFree(val);
692  }
693  dynarray_add(&rep->timelines, &rep->n_timelines, tml);
694  }
695 
696  return 0;
697 }
698 
699 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
700 {
701  char *tmp_str = NULL;
702  char *path = NULL;
703  char *mpdName = NULL;
704  xmlNodePtr node = NULL;
705  char *baseurl = NULL;
706  char *root_url = NULL;
707  char *text = NULL;
708  char *tmp = NULL;
709  int isRootHttp = 0;
710  char token ='/';
711  int start = 0;
712  int rootId = 0;
713  int updated = 0;
714  int size = 0;
715  int i;
716  int tmp_max_url_size = strlen(url);
717 
718  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
719  text = xmlNodeGetContent(baseurl_nodes[i]);
720  if (!text)
721  continue;
722  tmp_max_url_size += strlen(text);
723  if (ishttp(text)) {
724  xmlFree(text);
725  break;
726  }
727  xmlFree(text);
728  }
729 
730  tmp_max_url_size = aligned(tmp_max_url_size);
731  text = av_mallocz(tmp_max_url_size);
732  if (!text) {
733  updated = AVERROR(ENOMEM);
734  goto end;
735  }
736  av_strlcpy(text, url, strlen(url)+1);
737  tmp = text;
738  while (mpdName = av_strtok(tmp, "/", &tmp)) {
739  size = strlen(mpdName);
740  }
741  av_free(text);
742 
743  path = av_mallocz(tmp_max_url_size);
744  tmp_str = av_mallocz(tmp_max_url_size);
745  if (!tmp_str || !path) {
746  updated = AVERROR(ENOMEM);
747  goto end;
748  }
749 
750  av_strlcpy (path, url, strlen(url) - size + 1);
751  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
752  if (!(node = baseurl_nodes[rootId])) {
753  continue;
754  }
755  text = xmlNodeGetContent(node);
756  if (ishttp(text)) {
757  xmlFree(text);
758  break;
759  }
760  xmlFree(text);
761  }
762 
763  node = baseurl_nodes[rootId];
764  baseurl = xmlNodeGetContent(node);
765  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
766  if (node) {
767  xmlNodeSetContent(node, root_url);
768  updated = 1;
769  }
770 
771  size = strlen(root_url);
772  isRootHttp = ishttp(root_url);
773 
774  if (root_url[size - 1] != token) {
775  av_strlcat(root_url, "/", size + 2);
776  size += 2;
777  }
778 
779  for (i = 0; i < n_baseurl_nodes; ++i) {
780  if (i == rootId) {
781  continue;
782  }
783  text = xmlNodeGetContent(baseurl_nodes[i]);
784  if (text) {
785  memset(tmp_str, 0, strlen(tmp_str));
786  if (!ishttp(text) && isRootHttp) {
787  av_strlcpy(tmp_str, root_url, size + 1);
788  }
789  start = (text[0] == token);
790  av_strlcat(tmp_str, text + start, tmp_max_url_size);
791  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
792  updated = 1;
793  xmlFree(text);
794  }
795  }
796 
797 end:
798  if (tmp_max_url_size > *max_url_size) {
799  *max_url_size = tmp_max_url_size;
800  }
801  av_free(path);
802  av_free(tmp_str);
803  xmlFree(baseurl);
804  return updated;
805 
806 }
807 
808 static int parse_manifest_representation(AVFormatContext *s, const char *url,
809  xmlNodePtr node,
810  xmlNodePtr adaptionset_node,
811  xmlNodePtr mpd_baseurl_node,
812  xmlNodePtr period_baseurl_node,
813  xmlNodePtr period_segmenttemplate_node,
814  xmlNodePtr period_segmentlist_node,
815  xmlNodePtr fragment_template_node,
816  xmlNodePtr content_component_node,
817  xmlNodePtr adaptionset_baseurl_node,
818  xmlNodePtr adaptionset_segmentlist_node,
819  xmlNodePtr adaptionset_supplementalproperty_node)
820 {
821  int32_t ret = 0;
822  int32_t subtitle_rep_idx = 0;
823  int32_t audio_rep_idx = 0;
824  int32_t video_rep_idx = 0;
825  DASHContext *c = s->priv_data;
826  struct representation *rep = NULL;
827  struct fragment *seg = NULL;
828  xmlNodePtr representation_segmenttemplate_node = NULL;
829  xmlNodePtr representation_baseurl_node = NULL;
830  xmlNodePtr representation_segmentlist_node = NULL;
831  xmlNodePtr segmentlists_tab[3];
832  xmlNodePtr fragment_timeline_node = NULL;
833  xmlNodePtr fragment_templates_tab[5];
834  char *duration_val = NULL;
835  char *presentation_timeoffset_val = NULL;
836  char *startnumber_val = NULL;
837  char *timescale_val = NULL;
838  char *initialization_val = NULL;
839  char *media_val = NULL;
840  char *val = NULL;
841  xmlNodePtr baseurl_nodes[4];
842  xmlNodePtr representation_node = node;
843  char *rep_id_val = xmlGetProp(representation_node, "id");
844  char *rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
845  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
847 
848  // try get information from representation
849  if (type == AVMEDIA_TYPE_UNKNOWN)
850  type = get_content_type(representation_node);
851  // try get information from contentComponen
852  if (type == AVMEDIA_TYPE_UNKNOWN)
853  type = get_content_type(content_component_node);
854  // try get information from adaption set
855  if (type == AVMEDIA_TYPE_UNKNOWN)
856  type = get_content_type(adaptionset_node);
857  if (type == AVMEDIA_TYPE_UNKNOWN) {
858  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
860  // convert selected representation to our internal struct
861  rep = av_mallocz(sizeof(struct representation));
862  if (!rep) {
863  ret = AVERROR(ENOMEM);
864  goto end;
865  }
866  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
867  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
868  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
869 
870  baseurl_nodes[0] = mpd_baseurl_node;
871  baseurl_nodes[1] = period_baseurl_node;
872  baseurl_nodes[2] = adaptionset_baseurl_node;
873  baseurl_nodes[3] = representation_baseurl_node;
874 
875  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
876  c->max_url_size = aligned(c->max_url_size
877  + (rep_id_val ? strlen(rep_id_val) : 0)
878  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
879  if (ret == AVERROR(ENOMEM) || ret == 0) {
880  goto end;
881  }
882  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
883  fragment_timeline_node = NULL;
884  fragment_templates_tab[0] = representation_segmenttemplate_node;
885  fragment_templates_tab[1] = adaptionset_segmentlist_node;
886  fragment_templates_tab[2] = fragment_template_node;
887  fragment_templates_tab[3] = period_segmenttemplate_node;
888  fragment_templates_tab[4] = period_segmentlist_node;
889 
890  presentation_timeoffset_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
891  duration_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
892  startnumber_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
893  timescale_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
894  initialization_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
895  media_val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
896 
897  if (initialization_val) {
898  rep->init_section = av_mallocz(sizeof(struct fragment));
899  if (!rep->init_section) {
900  av_free(rep);
901  ret = AVERROR(ENOMEM);
902  goto end;
903  }
904  c->max_url_size = aligned(c->max_url_size + strlen(initialization_val));
905  rep->init_section->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, initialization_val);
906  if (!rep->init_section->url) {
907  av_free(rep->init_section);
908  av_free(rep);
909  ret = AVERROR(ENOMEM);
910  goto end;
911  }
912  rep->init_section->size = -1;
913  xmlFree(initialization_val);
914  }
915 
916  if (media_val) {
917  c->max_url_size = aligned(c->max_url_size + strlen(media_val));
918  rep->url_template = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, media_val);
919  xmlFree(media_val);
920  }
921 
922  if (presentation_timeoffset_val) {
923  rep->presentation_timeoffset = (int64_t) strtoll(presentation_timeoffset_val, NULL, 10);
924  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
925  xmlFree(presentation_timeoffset_val);
926  }
927  if (duration_val) {
928  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
929  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
930  xmlFree(duration_val);
931  }
932  if (timescale_val) {
933  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
934  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
935  xmlFree(timescale_val);
936  }
937  if (startnumber_val) {
938  rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 10);
939  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
940  xmlFree(startnumber_val);
941  }
942  if (adaptionset_supplementalproperty_node) {
943  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
944  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
945  if (!val) {
946  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
947  } else {
948  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
949  xmlFree(val);
950  }
951  }
952  }
953 
954  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
955 
956  if (!fragment_timeline_node)
957  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
958  if (!fragment_timeline_node)
959  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
960  if (!fragment_timeline_node)
961  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
962  if (fragment_timeline_node) {
963  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
964  while (fragment_timeline_node) {
965  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
966  if (ret < 0) {
967  return ret;
968  }
969  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
970  }
971  }
972  } else if (representation_baseurl_node && !representation_segmentlist_node) {
973  seg = av_mallocz(sizeof(struct fragment));
974  if (!seg) {
975  ret = AVERROR(ENOMEM);
976  goto end;
977  }
978  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, NULL);
979  if (!seg->url) {
980  av_free(seg);
981  ret = AVERROR(ENOMEM);
982  goto end;
983  }
984  seg->size = -1;
985  dynarray_add(&rep->fragments, &rep->n_fragments, seg);
986  } else if (representation_segmentlist_node) {
987  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
988  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
989  xmlNodePtr fragmenturl_node = NULL;
990  segmentlists_tab[0] = representation_segmentlist_node;
991  segmentlists_tab[1] = adaptionset_segmentlist_node;
992  segmentlists_tab[2] = period_segmentlist_node;
993 
994  duration_val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
995  timescale_val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
996  if (duration_val) {
997  rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
998  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
999  xmlFree(duration_val);
1000  }
1001  if (timescale_val) {
1002  rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
1003  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1004  xmlFree(timescale_val);
1005  }
1006  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1007  while (fragmenturl_node) {
1008  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1009  baseurl_nodes,
1010  rep_id_val,
1011  rep_bandwidth_val);
1012  if (ret < 0) {
1013  return ret;
1014  }
1015  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1016  }
1017 
1018  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
1019 
1020  if (!fragment_timeline_node)
1021  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
1022  if (!fragment_timeline_node)
1023  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1024  if (!fragment_timeline_node)
1025  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1026  if (fragment_timeline_node) {
1027  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1028  while (fragment_timeline_node) {
1029  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1030  if (ret < 0) {
1031  return ret;
1032  }
1033  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1034  }
1035  }
1036  } else {
1037  free_representation(rep);
1038  rep = NULL;
1039  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id[%s] \n", (const char *)rep_id_val);
1040  }
1041 
1042  if (rep) {
1043  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1044  rep->fragment_timescale = 1;
1045  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1046  strncpy(rep->id, rep_id_val ? rep_id_val : "", sizeof(rep->id));
1047  rep->framerate = av_make_q(0, 0);
1048  if (type == AVMEDIA_TYPE_VIDEO && rep_framerate_val) {
1049  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1050  if (ret < 0)
1051  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1052  }
1053 
1054  switch (type) {
1055  case AVMEDIA_TYPE_VIDEO:
1056  rep->rep_idx = video_rep_idx;
1057  dynarray_add(&c->videos, &c->n_videos, rep);
1058  break;
1059  case AVMEDIA_TYPE_AUDIO:
1060  rep->rep_idx = audio_rep_idx;
1061  dynarray_add(&c->audios, &c->n_audios, rep);
1062  break;
1063  case AVMEDIA_TYPE_SUBTITLE:
1064  rep->rep_idx = subtitle_rep_idx;
1065  dynarray_add(&c->subtitles, &c->n_subtitles, rep);
1066  break;
1067  default:
1068  av_log(s, AV_LOG_WARNING, "Unsupported the stream type %d\n", type);
1069  break;
1070  }
1071  }
1072  }
1073 
1074  video_rep_idx += type == AVMEDIA_TYPE_VIDEO;
1075  audio_rep_idx += type == AVMEDIA_TYPE_AUDIO;
1076  subtitle_rep_idx += type == AVMEDIA_TYPE_SUBTITLE;
1077 
1078 end:
1079  if (rep_id_val)
1080  xmlFree(rep_id_val);
1081  if (rep_bandwidth_val)
1082  xmlFree(rep_bandwidth_val);
1083  if (rep_framerate_val)
1084  xmlFree(rep_framerate_val);
1085 
1086  return ret;
1087 }
1088 
1090  xmlNodePtr adaptionset_node,
1091  xmlNodePtr mpd_baseurl_node,
1092  xmlNodePtr period_baseurl_node,
1093  xmlNodePtr period_segmenttemplate_node,
1094  xmlNodePtr period_segmentlist_node)
1095 {
1096  int ret = 0;
1097  xmlNodePtr fragment_template_node = NULL;
1098  xmlNodePtr content_component_node = NULL;
1099  xmlNodePtr adaptionset_baseurl_node = NULL;
1100  xmlNodePtr adaptionset_segmentlist_node = NULL;
1101  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1102  xmlNodePtr node = NULL;
1103 
1104  node = xmlFirstElementChild(adaptionset_node);
1105  while (node) {
1106  if (!av_strcasecmp(node->name, (const char *)"SegmentTemplate")) {
1107  fragment_template_node = node;
1108  } else if (!av_strcasecmp(node->name, (const char *)"ContentComponent")) {
1109  content_component_node = node;
1110  } else if (!av_strcasecmp(node->name, (const char *)"BaseURL")) {
1111  adaptionset_baseurl_node = node;
1112  } else if (!av_strcasecmp(node->name, (const char *)"SegmentList")) {
1113  adaptionset_segmentlist_node = node;
1114  } else if (!av_strcasecmp(node->name, (const char *)"SupplementalProperty")) {
1115  adaptionset_supplementalproperty_node = node;
1116  } else if (!av_strcasecmp(node->name, (const char *)"Representation")) {
1118  adaptionset_node,
1119  mpd_baseurl_node,
1120  period_baseurl_node,
1121  period_segmenttemplate_node,
1122  period_segmentlist_node,
1123  fragment_template_node,
1124  content_component_node,
1125  adaptionset_baseurl_node,
1126  adaptionset_segmentlist_node,
1127  adaptionset_supplementalproperty_node);
1128  if (ret < 0) {
1129  return ret;
1130  }
1131  }
1132  node = xmlNextElementSibling(node);
1133  }
1134  return 0;
1135 }
1136 
1137 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1138 {
1139  xmlChar *val = NULL;
1140 
1141  node = xmlFirstElementChild(node);
1142  while (node) {
1143  if (!av_strcasecmp(node->name, "Title")) {
1144  val = xmlNodeGetContent(node);
1145  if (val) {
1146  av_dict_set(&s->metadata, "Title", val, 0);
1147  }
1148  } else if (!av_strcasecmp(node->name, "Source")) {
1149  val = xmlNodeGetContent(node);
1150  if (val) {
1151  av_dict_set(&s->metadata, "Source", val, 0);
1152  }
1153  } else if (!av_strcasecmp(node->name, "Copyright")) {
1154  val = xmlNodeGetContent(node);
1155  if (val) {
1156  av_dict_set(&s->metadata, "Copyright", val, 0);
1157  }
1158  }
1159  node = xmlNextElementSibling(node);
1160  xmlFree(val);
1161  }
1162  return 0;
1163 }
1164 
1165 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1166 {
1167  DASHContext *c = s->priv_data;
1168  int ret = 0;
1169  int close_in = 0;
1170  uint8_t *new_url = NULL;
1171  int64_t filesize = 0;
1172  char *buffer = NULL;
1173  AVDictionary *opts = NULL;
1174  xmlDoc *doc = NULL;
1175  xmlNodePtr root_element = NULL;
1176  xmlNodePtr node = NULL;
1177  xmlNodePtr period_node = NULL;
1178  xmlNodePtr tmp_node = NULL;
1179  xmlNodePtr mpd_baseurl_node = NULL;
1180  xmlNodePtr period_baseurl_node = NULL;
1181  xmlNodePtr period_segmenttemplate_node = NULL;
1182  xmlNodePtr period_segmentlist_node = NULL;
1183  xmlNodePtr adaptionset_node = NULL;
1184  xmlAttrPtr attr = NULL;
1185  char *val = NULL;
1186  uint32_t period_duration_sec = 0;
1187  uint32_t period_start_sec = 0;
1188 
1189  if (!in) {
1190  close_in = 1;
1191 
1192  av_dict_copy(&opts, c->avio_opts, 0);
1193  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1194  av_dict_free(&opts);
1195  if (ret < 0)
1196  return ret;
1197  }
1198 
1199  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0) {
1200  c->base_url = av_strdup(new_url);
1201  } else {
1202  c->base_url = av_strdup(url);
1203  }
1204 
1205  filesize = avio_size(in);
1206  if (filesize <= 0) {
1207  filesize = 8 * 1024;
1208  }
1209 
1210  buffer = av_mallocz(filesize);
1211  if (!buffer) {
1212  av_free(c->base_url);
1213  return AVERROR(ENOMEM);
1214  }
1215 
1216  filesize = avio_read(in, buffer, filesize);
1217  if (filesize <= 0) {
1218  av_log(s, AV_LOG_ERROR, "Unable to read to offset '%s'\n", url);
1220  } else {
1221  LIBXML_TEST_VERSION
1222 
1223  doc = xmlReadMemory(buffer, filesize, c->base_url, NULL, 0);
1224  root_element = xmlDocGetRootElement(doc);
1225  node = root_element;
1226 
1227  if (!node) {
1229  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1230  goto cleanup;
1231  }
1232 
1233  if (node->type != XML_ELEMENT_NODE ||
1234  av_strcasecmp(node->name, (const char *)"MPD")) {
1236  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1237  goto cleanup;
1238  }
1239 
1240  val = xmlGetProp(node, "type");
1241  if (!val) {
1242  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1244  goto cleanup;
1245  }
1246  if (!av_strcasecmp(val, (const char *)"dynamic"))
1247  c->is_live = 1;
1248  xmlFree(val);
1249 
1250  attr = node->properties;
1251  while (attr) {
1252  val = xmlGetProp(node, attr->name);
1253 
1254  if (!av_strcasecmp(attr->name, (const char *)"availabilityStartTime")) {
1255  c->availability_start_time = get_utc_date_time_insec(s, (const char *)val);
1256  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1257  } else if (!av_strcasecmp(attr->name, (const char *)"availabilityEndTime")) {
1258  c->availability_end_time = get_utc_date_time_insec(s, (const char *)val);
1259  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1260  } else if (!av_strcasecmp(attr->name, (const char *)"publishTime")) {
1261  c->publish_time = get_utc_date_time_insec(s, (const char *)val);
1262  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1263  } else if (!av_strcasecmp(attr->name, (const char *)"minimumUpdatePeriod")) {
1264  c->minimum_update_period = get_duration_insec(s, (const char *)val);
1265  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1266  } else if (!av_strcasecmp(attr->name, (const char *)"timeShiftBufferDepth")) {
1267  c->time_shift_buffer_depth = get_duration_insec(s, (const char *)val);
1268  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1269  } else if (!av_strcasecmp(attr->name, (const char *)"minBufferTime")) {
1270  c->min_buffer_time = get_duration_insec(s, (const char *)val);
1271  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1272  } else if (!av_strcasecmp(attr->name, (const char *)"suggestedPresentationDelay")) {
1273  c->suggested_presentation_delay = get_duration_insec(s, (const char *)val);
1274  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1275  } else if (!av_strcasecmp(attr->name, (const char *)"mediaPresentationDuration")) {
1276  c->media_presentation_duration = get_duration_insec(s, (const char *)val);
1277  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1278  }
1279  attr = attr->next;
1280  xmlFree(val);
1281  }
1282 
1283  tmp_node = find_child_node_by_name(node, "BaseURL");
1284  if (tmp_node) {
1285  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1286  } else {
1287  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1288  }
1289 
1290  // at now we can handle only one period, with the longest duration
1291  node = xmlFirstElementChild(node);
1292  while (node) {
1293  if (!av_strcasecmp(node->name, (const char *)"Period")) {
1294  period_duration_sec = 0;
1295  period_start_sec = 0;
1296  attr = node->properties;
1297  while (attr) {
1298  val = xmlGetProp(node, attr->name);
1299  if (!av_strcasecmp(attr->name, (const char *)"duration")) {
1300  period_duration_sec = get_duration_insec(s, (const char *)val);
1301  } else if (!av_strcasecmp(attr->name, (const char *)"start")) {
1302  period_start_sec = get_duration_insec(s, (const char *)val);
1303  }
1304  attr = attr->next;
1305  xmlFree(val);
1306  }
1307  if ((period_duration_sec) >= (c->period_duration)) {
1308  period_node = node;
1309  c->period_duration = period_duration_sec;
1310  c->period_start = period_start_sec;
1311  if (c->period_start > 0)
1312  c->media_presentation_duration = c->period_duration;
1313  }
1314  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1315  parse_programinformation(s, node);
1316  }
1317  node = xmlNextElementSibling(node);
1318  }
1319  if (!period_node) {
1320  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1322  goto cleanup;
1323  }
1324 
1325  adaptionset_node = xmlFirstElementChild(period_node);
1326  while (adaptionset_node) {
1327  if (!av_strcasecmp(adaptionset_node->name, (const char *)"BaseURL")) {
1328  period_baseurl_node = adaptionset_node;
1329  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentTemplate")) {
1330  period_segmenttemplate_node = adaptionset_node;
1331  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"SegmentList")) {
1332  period_segmentlist_node = adaptionset_node;
1333  } else if (!av_strcasecmp(adaptionset_node->name, (const char *)"AdaptationSet")) {
1334  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1335  }
1336  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1337  }
1338 cleanup:
1339  /*free the document */
1340  xmlFreeDoc(doc);
1341  xmlCleanupParser();
1342  xmlFreeNode(mpd_baseurl_node);
1343  }
1344 
1345  av_free(new_url);
1346  av_free(buffer);
1347  if (close_in) {
1348  avio_close(in);
1349  }
1350  return ret;
1351 }
1352 
1353 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1354 {
1355  DASHContext *c = s->priv_data;
1356  int64_t num = 0;
1357  int64_t start_time_offset = 0;
1358 
1359  if (c->is_live) {
1360  if (pls->n_fragments) {
1361  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1362  num = pls->first_seq_no;
1363  } else if (pls->n_timelines) {
1364  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1365  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1366  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1367  if (num == -1)
1368  num = pls->first_seq_no;
1369  else
1370  num += pls->first_seq_no;
1371  } else if (pls->fragment_duration){
1372  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1373  if (pls->presentation_timeoffset) {
1374  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
1375  } else if (c->publish_time > 0 && !c->availability_start_time) {
1376  if (c->min_buffer_time) {
1377  num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
1378  } else {
1379  num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1380  }
1381  } else {
1382  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1383  }
1384  }
1385  } else {
1386  num = pls->first_seq_no;
1387  }
1388  return num;
1389 }
1390 
1391 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1392 {
1393  DASHContext *c = s->priv_data;
1394  int64_t num = 0;
1395 
1396  if (c->is_live && pls->fragment_duration) {
1397  av_log(s, AV_LOG_TRACE, "in live mode\n");
1398  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
1399  } else {
1400  num = pls->first_seq_no;
1401  }
1402  return num;
1403 }
1404 
1405 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1406 {
1407  int64_t num = 0;
1408 
1409  if (pls->n_fragments) {
1410  num = pls->first_seq_no + pls->n_fragments - 1;
1411  } else if (pls->n_timelines) {
1412  int i = 0;
1413  num = pls->first_seq_no + pls->n_timelines - 1;
1414  for (i = 0; i < pls->n_timelines; i++) {
1415  if (pls->timelines[i]->repeat == -1) {
1416  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1417  num = c->period_duration / length_of_each_segment;
1418  } else {
1419  num += pls->timelines[i]->repeat;
1420  }
1421  }
1422  } else if (c->is_live && pls->fragment_duration) {
1423  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time)) * pls->fragment_timescale) / pls->fragment_duration;
1424  } else if (pls->fragment_duration) {
1425  num = pls->first_seq_no + (c->media_presentation_duration * pls->fragment_timescale) / pls->fragment_duration;
1426  }
1427 
1428  return num;
1429 }
1430 
1431 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1432 {
1433  if (rep_dest && rep_src ) {
1434  free_timelines_list(rep_dest);
1435  rep_dest->timelines = rep_src->timelines;
1436  rep_dest->n_timelines = rep_src->n_timelines;
1437  rep_dest->first_seq_no = rep_src->first_seq_no;
1438  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1439  rep_src->timelines = NULL;
1440  rep_src->n_timelines = 0;
1441  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1442  }
1443 }
1444 
1445 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1446 {
1447  if (rep_dest && rep_src ) {
1448  free_fragment_list(rep_dest);
1449  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1450  rep_dest->cur_seq_no = 0;
1451  else
1452  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1453  rep_dest->fragments = rep_src->fragments;
1454  rep_dest->n_fragments = rep_src->n_fragments;
1455  rep_dest->parent = rep_src->parent;
1456  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1457  rep_src->fragments = NULL;
1458  rep_src->n_fragments = 0;
1459  }
1460 }
1461 
1462 
1464 {
1465  int ret = 0, i;
1466  DASHContext *c = s->priv_data;
1467  // save current context
1468  int n_videos = c->n_videos;
1469  struct representation **videos = c->videos;
1470  int n_audios = c->n_audios;
1471  struct representation **audios = c->audios;
1472  int n_subtitles = c->n_subtitles;
1473  struct representation **subtitles = c->subtitles;
1474  char *base_url = c->base_url;
1475 
1476  c->base_url = NULL;
1477  c->n_videos = 0;
1478  c->videos = NULL;
1479  c->n_audios = 0;
1480  c->audios = NULL;
1481  c->n_subtitles = 0;
1482  c->subtitles = NULL;
1483  ret = parse_manifest(s, s->url, NULL);
1484  if (ret)
1485  goto finish;
1486 
1487  if (c->n_videos != n_videos) {
1489  "new manifest has mismatched no. of video representations, %d -> %d\n",
1490  n_videos, c->n_videos);
1491  return AVERROR_INVALIDDATA;
1492  }
1493  if (c->n_audios != n_audios) {
1495  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1496  n_audios, c->n_audios);
1497  return AVERROR_INVALIDDATA;
1498  }
1499  if (c->n_subtitles != n_subtitles) {
1501  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1502  n_subtitles, c->n_subtitles);
1503  return AVERROR_INVALIDDATA;
1504  }
1505 
1506  for (i = 0; i < n_videos; i++) {
1507  struct representation *cur_video = videos[i];
1508  struct representation *ccur_video = c->videos[i];
1509  if (cur_video->timelines) {
1510  // calc current time
1511  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1512  // update segments
1513  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1514  if (ccur_video->cur_seq_no >= 0) {
1515  move_timelines(ccur_video, cur_video, c);
1516  }
1517  }
1518  if (cur_video->fragments) {
1519  move_segments(ccur_video, cur_video, c);
1520  }
1521  }
1522  for (i = 0; i < n_audios; i++) {
1523  struct representation *cur_audio = audios[i];
1524  struct representation *ccur_audio = c->audios[i];
1525  if (cur_audio->timelines) {
1526  // calc current time
1527  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1528  // update segments
1529  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1530  if (ccur_audio->cur_seq_no >= 0) {
1531  move_timelines(ccur_audio, cur_audio, c);
1532  }
1533  }
1534  if (cur_audio->fragments) {
1535  move_segments(ccur_audio, cur_audio, c);
1536  }
1537  }
1538 
1539 finish:
1540  // restore context
1541  if (c->base_url)
1542  av_free(base_url);
1543  else
1544  c->base_url = base_url;
1545 
1546  if (c->subtitles)
1548  if (c->audios)
1549  free_audio_list(c);
1550  if (c->videos)
1551  free_video_list(c);
1552 
1553  c->n_subtitles = n_subtitles;
1554  c->subtitles = subtitles;
1555  c->n_audios = n_audios;
1556  c->audios = audios;
1557  c->n_videos = n_videos;
1558  c->videos = videos;
1559  return ret;
1560 }
1561 
1562 static struct fragment *get_current_fragment(struct representation *pls)
1563 {
1564  int64_t min_seq_no = 0;
1565  int64_t max_seq_no = 0;
1566  struct fragment *seg = NULL;
1567  struct fragment *seg_ptr = NULL;
1568  DASHContext *c = pls->parent->priv_data;
1569 
1570  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1571  if (pls->cur_seq_no < pls->n_fragments) {
1572  seg_ptr = pls->fragments[pls->cur_seq_no];
1573  seg = av_mallocz(sizeof(struct fragment));
1574  if (!seg) {
1575  return NULL;
1576  }
1577  seg->url = av_strdup(seg_ptr->url);
1578  if (!seg->url) {
1579  av_free(seg);
1580  return NULL;
1581  }
1582  seg->size = seg_ptr->size;
1583  seg->url_offset = seg_ptr->url_offset;
1584  return seg;
1585  } else if (c->is_live) {
1586  refresh_manifest(pls->parent);
1587  } else {
1588  break;
1589  }
1590  }
1591  if (c->is_live) {
1592  min_seq_no = calc_min_seg_no(pls->parent, pls);
1593  max_seq_no = calc_max_seg_no(pls, c);
1594 
1595  if (pls->timelines || pls->fragments) {
1596  refresh_manifest(pls->parent);
1597  }
1598  if (pls->cur_seq_no <= min_seq_no) {
1599  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"], playlist %d\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no, (int)pls->rep_idx);
1600  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1601  } else if (pls->cur_seq_no > max_seq_no) {
1602  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"], playlist %d\n", min_seq_no, max_seq_no, (int)pls->rep_idx);
1603  }
1604  seg = av_mallocz(sizeof(struct fragment));
1605  if (!seg) {
1606  return NULL;
1607  }
1608  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1609  seg = av_mallocz(sizeof(struct fragment));
1610  if (!seg) {
1611  return NULL;
1612  }
1613  }
1614  if (seg) {
1615  char *tmpfilename= av_mallocz(c->max_url_size);
1616  if (!tmpfilename) {
1617  return NULL;
1618  }
1619  ff_dash_fill_tmpl_params(tmpfilename, c->max_url_size, pls->url_template, 0, pls->cur_seq_no, 0, get_segment_start_time_based_on_timeline(pls, pls->cur_seq_no));
1620  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1621  if (!seg->url) {
1622  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1623  seg->url = av_strdup(pls->url_template);
1624  if (!seg->url) {
1625  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1626  av_free(tmpfilename);
1627  return NULL;
1628  }
1629  }
1630  av_free(tmpfilename);
1631  seg->size = -1;
1632  }
1633 
1634  return seg;
1635 }
1636 
1637 static int read_from_url(struct representation *pls, struct fragment *seg,
1638  uint8_t *buf, int buf_size)
1639 {
1640  int ret;
1641 
1642  /* limit read if the fragment was only a part of a file */
1643  if (seg->size >= 0)
1644  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1645 
1646  ret = avio_read(pls->input, buf, buf_size);
1647  if (ret > 0)
1648  pls->cur_seg_offset += ret;
1649 
1650  return ret;
1651 }
1652 
1653 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1654 {
1655  AVDictionary *opts = NULL;
1656  char *url = NULL;
1657  int ret = 0;
1658 
1659  url = av_mallocz(c->max_url_size);
1660  if (!url) {
1661  ret = AVERROR(ENOMEM);
1662  goto cleanup;
1663  }
1664 
1665  if (seg->size >= 0) {
1666  /* try to restrict the HTTP request to the part we want
1667  * (if this is in fact a HTTP request) */
1668  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1669  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1670  }
1671 
1672  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1673  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64", playlist %d\n",
1674  url, seg->url_offset, pls->rep_idx);
1675  ret = open_url(pls->parent, &pls->input, url, c->avio_opts, opts, NULL);
1676 
1677 cleanup:
1678  av_free(url);
1679  av_dict_free(&opts);
1680  pls->cur_seg_offset = 0;
1681  pls->cur_seg_size = seg->size;
1682  return ret;
1683 }
1684 
1685 static int update_init_section(struct representation *pls)
1686 {
1687  static const int max_init_section_size = 1024 * 1024;
1688  DASHContext *c = pls->parent->priv_data;
1689  int64_t sec_size;
1690  int64_t urlsize;
1691  int ret;
1692 
1693  if (!pls->init_section || pls->init_sec_buf)
1694  return 0;
1695 
1696  ret = open_input(c, pls, pls->init_section);
1697  if (ret < 0) {
1699  "Failed to open an initialization section in playlist %d\n",
1700  pls->rep_idx);
1701  return ret;
1702  }
1703 
1704  if (pls->init_section->size >= 0)
1705  sec_size = pls->init_section->size;
1706  else if ((urlsize = avio_size(pls->input)) >= 0)
1707  sec_size = urlsize;
1708  else
1709  sec_size = max_init_section_size;
1710 
1711  av_log(pls->parent, AV_LOG_DEBUG,
1712  "Downloading an initialization section of size %"PRId64"\n",
1713  sec_size);
1714 
1715  sec_size = FFMIN(sec_size, max_init_section_size);
1716 
1717  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1718 
1719  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1720  pls->init_sec_buf_size);
1721  ff_format_io_close(pls->parent, &pls->input);
1722 
1723  if (ret < 0)
1724  return ret;
1725 
1726  pls->init_sec_data_len = ret;
1727  pls->init_sec_buf_read_offset = 0;
1728 
1729  return 0;
1730 }
1731 
1732 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1733 {
1734  struct representation *v = opaque;
1735  if (v->n_fragments && !v->init_sec_data_len) {
1736  return avio_seek(v->input, offset, whence);
1737  }
1738 
1739  return AVERROR(ENOSYS);
1740 }
1741 
1742 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1743 {
1744  int ret = 0;
1745  struct representation *v = opaque;
1746  DASHContext *c = v->parent->priv_data;
1747 
1748 restart:
1749  if (!v->input) {
1750  free_fragment(&v->cur_seg);
1751  v->cur_seg = get_current_fragment(v);
1752  if (!v->cur_seg) {
1753  ret = AVERROR_EOF;
1754  goto end;
1755  }
1756 
1757  /* load/update Media Initialization Section, if any */
1758  ret = update_init_section(v);
1759  if (ret)
1760  goto end;
1761 
1762  ret = open_input(c, v, v->cur_seg);
1763  if (ret < 0) {
1764  if (ff_check_interrupt(c->interrupt_callback)) {
1765  ret = AVERROR_EXIT;
1766  goto end;
1767  }
1768  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist %d\n", v->rep_idx);
1769  v->cur_seq_no++;
1770  goto restart;
1771  }
1772  }
1773 
1775  /* Push init section out first before first actual fragment */
1776  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1777  memcpy(buf, v->init_sec_buf, copy_size);
1778  v->init_sec_buf_read_offset += copy_size;
1779  ret = copy_size;
1780  goto end;
1781  }
1782 
1783  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1784  if (!v->cur_seg) {
1785  v->cur_seg = get_current_fragment(v);
1786  }
1787  if (!v->cur_seg) {
1788  ret = AVERROR_EOF;
1789  goto end;
1790  }
1791  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1792  if (ret > 0)
1793  goto end;
1794 
1795  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1796  if (!v->is_restart_needed)
1797  v->cur_seq_no++;
1798  v->is_restart_needed = 1;
1799  }
1800 
1801 end:
1802  return ret;
1803 }
1804 
1806 {
1807  DASHContext *c = s->priv_data;
1808  const char *opts[] = {
1809  "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", NULL };
1810  const char **opt = opts;
1811  uint8_t *buf = NULL;
1812  int ret = 0;
1813 
1814  while (*opt) {
1815  if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
1816  if (buf[0] != '\0') {
1817  ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL);
1818  if (ret < 0) {
1819  av_freep(&buf);
1820  return ret;
1821  }
1822  } else {
1823  av_freep(&buf);
1824  }
1825  }
1826  opt++;
1827  }
1828 
1829  return ret;
1830 }
1831 
1832 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1833  int flags, AVDictionary **opts)
1834 {
1836  "A DASH playlist item '%s' referred to an external file '%s'. "
1837  "Opening this file was forbidden for security reasons\n",
1838  s->url, url);
1839  return AVERROR(EPERM);
1840 }
1841 
1843 {
1844  /* note: the internal buffer could have changed */
1845  av_freep(&pls->pb.buffer);
1846  memset(&pls->pb, 0x00, sizeof(AVIOContext));
1847  pls->ctx->pb = NULL;
1848  avformat_close_input(&pls->ctx);
1849  pls->ctx = NULL;
1850 }
1851 
1853 {
1854  DASHContext *c = s->priv_data;
1855  ff_const59 AVInputFormat *in_fmt = NULL;
1856  AVDictionary *in_fmt_opts = NULL;
1857  uint8_t *avio_ctx_buffer = NULL;
1858  int ret = 0, i;
1859 
1860  if (pls->ctx) {
1862  }
1863 
1864  if (ff_check_interrupt(&s->interrupt_callback)) {
1865  ret = AVERROR_EXIT;
1866  goto fail;
1867  }
1868 
1869  if (!(pls->ctx = avformat_alloc_context())) {
1870  ret = AVERROR(ENOMEM);
1871  goto fail;
1872  }
1873 
1874  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1875  if (!avio_ctx_buffer ) {
1876  ret = AVERROR(ENOMEM);
1877  avformat_free_context(pls->ctx);
1878  pls->ctx = NULL;
1879  goto fail;
1880  }
1881  if (c->is_live) {
1882  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, NULL);
1883  } else {
1884  ffio_init_context(&pls->pb, avio_ctx_buffer , INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, seek_data);
1885  }
1886  pls->pb.seekable = 0;
1887 
1888  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1889  goto fail;
1890 
1891  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1892  pls->ctx->probesize = 1024 * 4;
1894  ret = av_probe_input_buffer(&pls->pb, &in_fmt, "", NULL, 0, 0);
1895  if (ret < 0) {
1896  av_log(s, AV_LOG_ERROR, "Error when loading first fragment, playlist %d\n", (int)pls->rep_idx);
1897  avformat_free_context(pls->ctx);
1898  pls->ctx = NULL;
1899  goto fail;
1900  }
1901 
1902  pls->ctx->pb = &pls->pb;
1903  pls->ctx->io_open = nested_io_open;
1904 
1905  // provide additional information from mpd if available
1906  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1907  av_dict_free(&in_fmt_opts);
1908  if (ret < 0)
1909  goto fail;
1910  if (pls->n_fragments) {
1911 #if FF_API_R_FRAME_RATE
1912  if (pls->framerate.den) {
1913  for (i = 0; i < pls->ctx->nb_streams; i++)
1914  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1915  }
1916 #endif
1918  if (ret < 0)
1919  goto fail;
1920  }
1921 
1922 fail:
1923  return ret;
1924 }
1925 
1927 {
1928  int ret = 0;
1929  int i;
1930 
1931  pls->parent = s;
1932  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1933 
1934  if (!pls->last_seq_no) {
1935  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1936  }
1937 
1939  if (ret < 0) {
1940  goto fail;
1941  }
1942  for (i = 0; i < pls->ctx->nb_streams; i++) {
1944  AVStream *ist = pls->ctx->streams[i];
1945  if (!st) {
1946  ret = AVERROR(ENOMEM);
1947  goto fail;
1948  }
1949  st->id = i;
1952  }
1953 
1954  return 0;
1955 fail:
1956  return ret;
1957 }
1958 
1959 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1960 {
1961  struct fragment *first_init_section = pls[0]->init_section;
1962  char *url =NULL;
1963  int64_t url_offset = -1;
1964  int64_t size = -1;
1965  int i = 0;
1966 
1967  if (first_init_section == NULL || n_pls == 0)
1968  return 0;
1969 
1970  url = first_init_section->url;
1971  url_offset = first_init_section->url_offset;
1972  size = pls[0]->init_section->size;
1973  for (i=0;i<n_pls;i++) {
1974  if (av_strcasecmp(pls[i]->init_section->url,url) || pls[i]->init_section->url_offset != url_offset || pls[i]->init_section->size != size) {
1975  return 0;
1976  }
1977  }
1978  return 1;
1979 }
1980 
1981 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
1982 {
1983  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
1984  if (!rep_dest->init_sec_buf) {
1985  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
1986  return AVERROR(ENOMEM);
1987  }
1988  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
1989  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
1990  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
1991  rep_dest->cur_timestamp = rep_src->cur_timestamp;
1992 
1993  return 0;
1994 }
1995 
1996 
1998 {
1999  DASHContext *c = s->priv_data;
2000  struct representation *rep;
2001  int ret = 0;
2002  int stream_index = 0;
2003  int i;
2004 
2005  c->interrupt_callback = &s->interrupt_callback;
2006 
2007  if ((ret = save_avio_options(s)) < 0)
2008  goto fail;
2009 
2010  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2011  goto fail;
2012 
2013  /* If this isn't a live stream, fill the total duration of the
2014  * stream. */
2015  if (!c->is_live) {
2016  s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
2017  } else {
2018  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2019  }
2020 
2021  if(c->n_videos)
2022  c->is_init_section_common_video = is_common_init_section_exist(c->videos, c->n_videos);
2023 
2024  /* Open the demuxer for video and audio components if available */
2025  for (i = 0; i < c->n_videos; i++) {
2026  rep = c->videos[i];
2027  if (i > 0 && c->is_init_section_common_video) {
2028  ret = copy_init_section(rep, c->videos[0]);
2029  if (ret < 0)
2030  goto fail;
2031  }
2032  ret = open_demux_for_component(s, rep);
2033 
2034  if (ret)
2035  goto fail;
2036  rep->stream_index = stream_index;
2037  ++stream_index;
2038  }
2039 
2040  if(c->n_audios)
2041  c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
2042 
2043  for (i = 0; i < c->n_audios; i++) {
2044  rep = c->audios[i];
2045  if (i > 0 && c->is_init_section_common_audio) {
2046  ret = copy_init_section(rep, c->audios[0]);
2047  if (ret < 0)
2048  goto fail;
2049  }
2050  ret = open_demux_for_component(s, rep);
2051 
2052  if (ret)
2053  goto fail;
2054  rep->stream_index = stream_index;
2055  ++stream_index;
2056  }
2057 
2058  if (c->n_subtitles)
2059  c->is_init_section_common_audio = is_common_init_section_exist(c->subtitles, c->n_subtitles);
2060 
2061  for (i = 0; i < c->n_subtitles; i++) {
2062  rep = c->subtitles[i];
2063  if (i > 0 && c->is_init_section_common_audio) {
2064  ret = copy_init_section(rep, c->subtitles[0]);
2065  if (ret < 0)
2066  goto fail;
2067  }
2068  ret = open_demux_for_component(s, rep);
2069 
2070  if (ret)
2071  goto fail;
2072  rep->stream_index = stream_index;
2073  ++stream_index;
2074  }
2075 
2076  if (!stream_index) {
2078  goto fail;
2079  }
2080 
2081  /* Create a program */
2082  if (!ret) {
2083  AVProgram *program;
2084  program = av_new_program(s, 0);
2085  if (!program) {
2086  goto fail;
2087  }
2088 
2089  for (i = 0; i < c->n_videos; i++) {
2090  rep = c->videos[i];
2092  rep->assoc_stream = s->streams[rep->stream_index];
2093  if (rep->bandwidth > 0)
2094  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2095  if (rep->id[0])
2096  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2097  }
2098  for (i = 0; i < c->n_audios; i++) {
2099  rep = c->audios[i];
2101  rep->assoc_stream = s->streams[rep->stream_index];
2102  if (rep->bandwidth > 0)
2103  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2104  if (rep->id[0])
2105  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2106  }
2107  for (i = 0; i < c->n_subtitles; i++) {
2108  rep = c->subtitles[i];
2110  rep->assoc_stream = s->streams[rep->stream_index];
2111  if (rep->id[0])
2112  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2113  }
2114  }
2115 
2116  return 0;
2117 fail:
2118  return ret;
2119 }
2120 
2122 {
2123  int i, j;
2124 
2125  for (i = 0; i < n; i++) {
2126  struct representation *pls = p[i];
2127  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2128 
2129  if (needed && !pls->ctx) {
2130  pls->cur_seg_offset = 0;
2131  pls->init_sec_buf_read_offset = 0;
2132  /* Catch up */
2133  for (j = 0; j < n; j++) {
2134  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2135  }
2137  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2138  } else if (!needed && pls->ctx) {
2140  if (pls->input)
2141  ff_format_io_close(pls->parent, &pls->input);
2142  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2143  }
2144  }
2145 }
2146 
2148 {
2149  DASHContext *c = s->priv_data;
2150  int ret = 0, i;
2151  int64_t mints = 0;
2152  struct representation *cur = NULL;
2153  struct representation *rep = NULL;
2154 
2155  recheck_discard_flags(s, c->videos, c->n_videos);
2156  recheck_discard_flags(s, c->audios, c->n_audios);
2157  recheck_discard_flags(s, c->subtitles, c->n_subtitles);
2158 
2159  for (i = 0; i < c->n_videos; i++) {
2160  rep = c->videos[i];
2161  if (!rep->ctx)
2162  continue;
2163  if (!cur || rep->cur_timestamp < mints) {
2164  cur = rep;
2165  mints = rep->cur_timestamp;
2166  }
2167  }
2168  for (i = 0; i < c->n_audios; i++) {
2169  rep = c->audios[i];
2170  if (!rep->ctx)
2171  continue;
2172  if (!cur || rep->cur_timestamp < mints) {
2173  cur = rep;
2174  mints = rep->cur_timestamp;
2175  }
2176  }
2177 
2178  for (i = 0; i < c->n_subtitles; i++) {
2179  rep = c->subtitles[i];
2180  if (!rep->ctx)
2181  continue;
2182  if (!cur || rep->cur_timestamp < mints) {
2183  cur = rep;
2184  mints = rep->cur_timestamp;
2185  }
2186  }
2187 
2188  if (!cur) {
2189  return AVERROR_INVALIDDATA;
2190  }
2191  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2192  ret = av_read_frame(cur->ctx, pkt);
2193  if (ret >= 0) {
2194  /* If we got a packet, return it */
2195  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2196  pkt->stream_index = cur->stream_index;
2197  return 0;
2198  }
2199  if (cur->is_restart_needed) {
2200  cur->cur_seg_offset = 0;
2201  cur->init_sec_buf_read_offset = 0;
2202  if (cur->input)
2203  ff_format_io_close(cur->parent, &cur->input);
2205  cur->is_restart_needed = 0;
2206  }
2207  }
2208  return AVERROR_EOF;
2209 }
2210 
2212 {
2213  DASHContext *c = s->priv_data;
2214  free_audio_list(c);
2215  free_video_list(c);
2216  av_dict_free(&c->avio_opts);
2217  av_freep(&c->base_url);
2218  return 0;
2219 }
2220 
2221 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2222 {
2223  int ret = 0;
2224  int i = 0;
2225  int j = 0;
2226  int64_t duration = 0;
2227 
2228  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms], playlist %d%s\n",
2229  seek_pos_msec, pls->rep_idx, dry_run ? " (dry)" : "");
2230 
2231  // single fragment mode
2232  if (pls->n_fragments == 1) {
2233  pls->cur_timestamp = 0;
2234  pls->cur_seg_offset = 0;
2235  if (dry_run)
2236  return 0;
2237  ff_read_frame_flush(pls->ctx);
2238  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2239  }
2240 
2241  if (pls->input)
2242  ff_format_io_close(pls->parent, &pls->input);
2243 
2244  // find the nearest fragment
2245  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2246  int64_t num = pls->first_seq_no;
2247  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2248  "last_seq_no[%"PRId64"], playlist %d.\n",
2249  (int)pls->n_timelines, (int64_t)pls->last_seq_no, (int)pls->rep_idx);
2250  for (i = 0; i < pls->n_timelines; i++) {
2251  if (pls->timelines[i]->starttime > 0) {
2252  duration = pls->timelines[i]->starttime;
2253  }
2254  duration += pls->timelines[i]->duration;
2255  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2256  goto set_seq_num;
2257  }
2258  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2259  duration += pls->timelines[i]->duration;
2260  num++;
2261  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2262  goto set_seq_num;
2263  }
2264  }
2265  num++;
2266  }
2267 
2268 set_seq_num:
2269  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2270  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"], playlist %d.\n",
2271  (int64_t)pls->cur_seq_no, (int)pls->rep_idx);
2272  } else if (pls->fragment_duration > 0) {
2273  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2274  } else {
2275  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2276  pls->cur_seq_no = pls->first_seq_no;
2277  }
2278  pls->cur_timestamp = 0;
2279  pls->cur_seg_offset = 0;
2280  pls->init_sec_buf_read_offset = 0;
2281  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2282 
2283  return ret;
2284 }
2285 
2286 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2287 {
2288  int ret = 0, i;
2289  DASHContext *c = s->priv_data;
2290  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2291  s->streams[stream_index]->time_base.den,
2294  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2295  return AVERROR(ENOSYS);
2296 
2297  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2298  for (i = 0; i < c->n_videos; i++) {
2299  if (!ret)
2300  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2301  }
2302  for (i = 0; i < c->n_audios; i++) {
2303  if (!ret)
2304  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2305  }
2306  for (i = 0; i < c->n_subtitles; i++) {
2307  if (!ret)
2308  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2309  }
2310 
2311  return ret;
2312 }
2313 
2314 static int dash_probe(const AVProbeData *p)
2315 {
2316  if (!av_stristr(p->buf, "<MPD"))
2317  return 0;
2318 
2319  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2320  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2321  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2322  av_stristr(p->buf, "dash:profile:isoff-main:2011")) {
2323  return AVPROBE_SCORE_MAX;
2324  }
2325  if (av_stristr(p->buf, "dash:profile")) {
2326  return AVPROBE_SCORE_MAX;
2327  }
2328 
2329  return 0;
2330 }
2331 
2332 #define OFFSET(x) offsetof(DASHContext, x)
2333 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2334 static const AVOption dash_options[] = {
2335  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2336  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2337  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm"},
2338  INT_MIN, INT_MAX, FLAGS},
2339  {NULL}
2340 };
2341 
2342 static const AVClass dash_class = {
2343  .class_name = "dash",
2344  .item_name = av_default_item_name,
2345  .option = dash_options,
2346  .version = LIBAVUTIL_VERSION_INT,
2347 };
2348 
2350  .name = "dash",
2351  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2352  .priv_class = &dash_class,
2353  .priv_data_size = sizeof(DASHContext),
2360 };
reopen_demux_for_component
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1852
close_demux_for_component
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1842
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
calc_next_seg_no_from_timelines
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:284
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:475
program
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:6
open_demux_for_component
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1926
read_data
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1742
representation::rep_idx
int rep_idx
Definition: dashdec.c:82
representation::start_number
int64_t start_number
Definition: dashdec.c:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4480
representation::type
enum AVMediaType type
Definition: dashdec.c:86
get_current_time_in_sec
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:170
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1185
ishttp
static int ishttp(char *url)
Definition: dashdec.c:159
calc_min_seg_no
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1391
FLAGS
#define FLAGS
Definition: dashdec.c:2333
n
int n
Definition: avisynth_c.h:760
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:56
representation::assoc_stream
AVStream * assoc_stream
Definition: dashdec.c:90
free_video_list
static void free_video_list(DASHContext *c)
Definition: dashdec.c:364
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:925
representation::init_sec_buf_read_offset
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:117
representation::cur_seq_no
int64_t cur_seq_no
Definition: dashdec.c:107
get_current_fragment
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1562
DASHContext::n_subtitles
int n_subtitles
Definition: dashdec.c:130
av_strcasecmp
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:213
representation::cur_seg_offset
int64_t cur_seg_offset
Definition: dashdec.c:108
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
dash_close
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2211
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:127
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1410
AVOption
AVOption.
Definition: opt.h:246
DASHContext::interrupt_callback
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:148
parse_manifest_segmenturlnode
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:596
AVFMT_FLAG_CUSTOM_IO
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don't avio_close() it.
Definition: avformat.h:1481
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2496
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
DASHContext::n_audios
int n_audios
Definition: dashdec.c:128
AVDictionary
Definition: dict.c:30
representation::last_seq_no
int64_t last_seq_no
Definition: dashdec.c:99
AVFormatContext::probesize
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1508
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1785
read_from_url
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1637
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
representation::n_fragments
int n_fragments
Definition: dashdec.c:92
DASHContext::availability_end_time
uint64_t availability_end_time
Definition: dashdec.c:137
find_child_node_by_name
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:535
representation::first_seq_no
int64_t first_seq_no
Definition: dashdec.c:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
fragment
Definition: dashdec.c:33
ff_const59
#define ff_const59
The ff_const59 define is not part of the public API and will be removed without further warning.
Definition: avformat.h:540
DASHContext::n_videos
int n_videos
Definition: dashdec.c:126
DASHContext
Definition: dashdec.c:122
get_segment_start_time_based_on_timeline
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:249
DASHContext::subtitles
struct representation ** subtitles
Definition: dashdec.c:131
AVPROBE_SCORE_MAX
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:458
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4452
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1179
representation::init_section
struct fragment * init_section
Definition: dashdec.c:113
finish
static void finish(void)
Definition: movenc.c:345
DASHContext::publish_time
uint64_t publish_time
Definition: dashdec.c:138
free_timelines_list
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:334
calc_max_seg_no
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1405
free_fragment
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:314
fail
#define fail()
Definition: checkasm.h:120
start
void INT64 start
Definition: avisynth_c.h:767
calc_cur_seg_no
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1353
read_seek
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:153
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
recheck_discard_flags
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2121
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
dynarray_add
#define dynarray_add(tab, nb_ptr, elem)
Definition: internal.h:198
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:83
av_timegm
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:568
av_new_program
AVProgram * av_new_program(AVFormatContext *s, int id)
Definition: utils.c:4579
get_utc_date_time_insec
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:175
get_content_type
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:552
ff_check_interrupt
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:667
AVRational::num
int num
Numerator.
Definition: rational.h:59
dash_options
static const AVOption dash_options[]
Definition: dashdec.c:2334
DASHContext::avio_opts
AVDictionary * avio_opts
Definition: dashdec.c:150
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
DASHContext::suggested_presentation_delay
uint64_t suggested_presentation_delay
Definition: dashdec.c:135
seek_data
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1732
aligned
static int aligned(int val)
Definition: dashdec.c:165
representation::n_timelines
int n_timelines
Definition: dashdec.c:95
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
AVInputFormat
Definition: avformat.h:640
free_representation
static void free_representation(struct representation *pls)
Definition: dashdec.c:345
duration
int64_t duration
Definition: movenc.c:63
DASHContext::max_url_size
int max_url_size
Definition: dashdec.c:151
DASHContext::allowed_extensions
char * allowed_extensions
Definition: dashdec.c:149
intreadwrite.h
move_segments
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1445
s
#define s(width, name)
Definition: cbs_vp9.c:257
fragment::url_offset
int64_t url_offset
Definition: dashdec.c:34
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2525
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1473
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:645
free_fragment_list
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:323
AVProbeData::buf
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:448
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
av_match_ext
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:38
representation::is_restart_needed
int is_restart_needed
Definition: dashdec.c:119
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
parse_programinformation
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1137
get_duration_insec
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:205
DASHContext::videos
struct representation ** videos
Definition: dashdec.c:127
INITIAL_BUFFER_SIZE
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:31
representation::cur_timestamp
int64_t cur_timestamp
Definition: dashdec.c:118
timeline::duration
int64_t duration
Definition: dashdec.c:67
representation::init_sec_buf_size
uint32_t init_sec_buf_size
Definition: dashdec.c:115
representation::stream_index
int stream_index
Definition: dashdec.c:84
int32_t
int32_t
Definition: audio_convert.c:194
AVFormatContext::max_analyze_duration
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1516
representation::ctx
AVFormatContext * ctx
Definition: dashdec.c:80
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
AVFormatContext
Format I/O context.
Definition: avformat.h:1342
internal.h
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1017
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVSEEK_FLAG_BACKWARD
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2495
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:530
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:899
NULL
#define NULL
Definition: coverity.c:32
read_probe
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_strireplace
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:235
is_common_init_section_exist
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1959
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
dash_read_seek
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2286
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1384
parseutils.h
AVProbeData
This structure contains the data a format has to probe a file.
Definition: avformat.h:446
move_timelines
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1431
representation::timelines
struct timeline ** timelines
Definition: dashdec.c:96
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:934
DASHContext::minimum_update_period
uint64_t minimum_update_period
Definition: dashdec.c:139
time.h
open_url
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary *opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:397
timeline::starttime
int64_t starttime
Definition: dashdec.c:57
DASHContext::period_start
uint64_t period_start
Definition: dashdec.c:145
parse_manifest
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1165
representation::url_template
char * url_template
Definition: dashdec.c:76
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1398
get_val_from_nodes_tab
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:519
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
DASHContext::time_shift_buffer_depth
uint64_t time_shift_buffer_depth
Definition: dashdec.c:140
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3588
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
resolve_content_path
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:699
ffio_init_context
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:81
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
DASHContext::media_presentation_duration
uint64_t media_presentation_duration
Definition: dashdec.c:134
AVIOContext::seekable
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
representation::pb
AVIOContext pb
Definition: dashdec.c:77
start_time
static int64_t start_time
Definition: ffplay.c:331
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4910
ff_copy_whiteblacklists
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: utils.c:164
size
int size
Definition: twinvq_data.h:11134
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
representation::bandwidth
int bandwidth
Definition: dashdec.c:88
representation::parent
AVFormatContext * parent
Definition: dashdec.c:79
representation::rep_count
int rep_count
Definition: dashdec.c:83
AVMEDIA_TYPE_UNKNOWN
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
OFFSET
#define OFFSET(x)
Definition: dashdec.c:2332
val
const char const char void * val
Definition: avisynth_c.h:863
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
copy_init_section
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:1981
DASHContext::availability_start_time
uint64_t availability_start_time
Definition: dashdec.c:136
ff_dash_demuxer
AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2349
representation::init_sec_data_len
uint32_t init_sec_data_len
Definition: dashdec.c:116
dash_read_header
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:1997
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
free_audio_list
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:375
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
representation::framerate
AVRational framerate
Definition: dashdec.c:89
av_strstart
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:34
representation::id
char id[20]
Definition: dashdec.c:87
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
free_subtitle_list
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:386
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
avio_internal.h
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:540
dash_probe
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2314
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
DASHContext::audios
struct representation ** audios
Definition: dashdec.c:129
representation::fragment_timescale
int64_t fragment_timescale
Definition: dashdec.c:103
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AV_ROUND_DOWN
@ AV_ROUND_DOWN
Round toward -infinity.
Definition: mathematics.h:82
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
DASHContext::is_init_section_common_audio
int is_init_section_common_audio
Definition: dashdec.c:155
parse_manifest_adaptationset
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1089
uint8_t
uint8_t
Definition: audio_convert.c:194
fragment::url
char * url
Definition: dashdec.c:36
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1264
save_avio_options
static int save_avio_options(AVFormatContext *s)
Definition: dashdec.c:1805
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
DASHContext::min_buffer_time
uint64_t min_buffer_time
Definition: dashdec.c:141
nested_io_open
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1832
DASHContext::is_live
int is_live
Definition: dashdec.c:147
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:877
ret
ret
Definition: filter_design.txt:187
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVStream
Stream structure.
Definition: avformat.h:870
avio_seek
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:246
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
representation::input
AVIOContext * input
Definition: dashdec.c:78
get_Fragment
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:578
parse_manifest_segmenttimeline
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:663
ff_make_absolute_url
void ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:80
representation
Definition: dashdec.c:75
representation::init_sec_buf
uint8_t * init_sec_buf
Definition: dashdec.c:114
MAX_URL_SIZE
#define MAX_URL_SIZE
Definition: internal.h:30
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVRational::den
int den
Denominator.
Definition: rational.h:60
representation::cur_seg
struct fragment * cur_seg
Definition: dashdec.c:110
get_content_url
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:463
DASHContext::is_init_section_common_video
int is_init_section_common_video
Definition: dashdec.c:154
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4414
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:647
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:994
refresh_manifest
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1463
AVFormatContext::io_open
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1924
update_init_section
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1685
parse_manifest_representation
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:808
AVPacket::stream_index
int stream_index
Definition: avcodec.h:1479
dash_read_packet
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2147
open_input
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1653
timeline
Definition: dashdec.c:44
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
ff_format_io_close
void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: utils.c:5665
representation::cur_seg_size
int64_t cur_seg_size
Definition: dashdec.c:109
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:654
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVIOContext::buffer
unsigned char * buffer
Start of the buffer.
Definition: avio.h:226
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_probe_input_buffer
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:312
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
ff_read_frame_flush
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: utils.c:1935
ff_dash_fill_tmpl_params
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:96
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
representation::fragment_duration
int64_t fragment_duration
Definition: dashdec.c:102
avio_find_protocol_name
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:476
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:761
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
dash_seek
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2221
representation::pkt
AVPacket pkt
Definition: dashdec.c:81
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
timeline::repeat
int64_t repeat
Definition: dashdec.c:63
dash.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
DASHContext::base_url
char * base_url
Definition: dashdec.c:124
AVStream::pts_wrap_bits
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1062
int
int
Definition: ffmpeg_filter.c:191
representation::fragments
struct fragment ** fragments
Definition: dashdec.c:93
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1370
dash_class
static const AVClass dash_class
Definition: dashdec.c:2342
DASHContext::period_duration
uint64_t period_duration
Definition: dashdec.c:144
representation::presentation_timeoffset
int64_t presentation_timeoffset
Definition: dashdec.c:105
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2080
fragment::size
int64_t size
Definition: dashdec.c:35
av_program_add_stream_index
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)