FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include <time.h>
24 #include "libavutil/bprint.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/time.h"
27 #include "libavutil/parseutils.h"
28 #include "internal.h"
29 #include "avio_internal.h"
30 #include "dash.h"
31 #include "demux.h"
32 #include "url.h"
33 
34 #define INITIAL_BUFFER_SIZE 32768
35 
36 struct fragment {
39  char *url;
40 };
41 
42 /*
43  * reference to : ISO_IEC_23009-1-DASH-2012
44  * Section: 5.3.9.6.2
45  * Table: Table 17 — Semantics of SegmentTimeline element
46  * */
47 struct timeline {
48  /* starttime: Element or Attribute Name
49  * specifies the MPD start time, in @timescale units,
50  * the first Segment in the series starts relative to the beginning of the Period.
51  * The value of this attribute must be equal to or greater than the sum of the previous S
52  * element earliest presentation time and the sum of the contiguous Segment durations.
53  * If the value of the attribute is greater than what is expressed by the previous S element,
54  * it expresses discontinuities in the timeline.
55  * If not present then the value shall be assumed to be zero for the first S element
56  * and for the subsequent S elements, the value shall be assumed to be the sum of
57  * the previous S element's earliest presentation time and contiguous duration
58  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
59  * */
61  /* repeat: Element or Attribute Name
62  * specifies the repeat count of the number of following contiguous Segments with
63  * the same duration expressed by the value of @duration. This value is zero-based
64  * (e.g. a value of three means four Segments in the contiguous series).
65  * */
67  /* duration: Element or Attribute Name
68  * specifies the Segment duration, in units of the value of the @timescale.
69  * */
71 };
72 
73 /*
74  * Each playlist has its own demuxer. If it is currently active,
75  * it has an opened AVIOContext too, and potentially an AVPacket
76  * containing the next packet from this stream.
77  */
79  char *url_template;
85 
86  char *id;
87  char *lang;
88  int bandwidth;
90  AVStream *assoc_stream; /* demuxer stream associated with this representation */
91 
93  struct fragment **fragments; /* VOD list of fragment for profile */
94 
96  struct timeline **timelines;
97 
100  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
101 
104 
106 
110  struct fragment *cur_seg;
111 
112  /* Currently active Media Initialization Section */
114  uint8_t *init_sec_buf;
120 };
121 
122 typedef struct DASHContext {
123  const AVClass *class;
124  char *base_url;
125 
126  int n_videos;
128  int n_audios;
132 
133  /* MediaPresentationDescription Attribute */
138  uint64_t publish_time;
141  uint64_t min_buffer_time;
142 
143  /* Period Attribute */
144  uint64_t period_duration;
145  uint64_t period_start;
146 
147  /* AdaptationSet Attribute */
149 
150  int is_live;
156 
157  /* Flags for init section*/
161 
162 } DASHContext;
163 
164 static int ishttp(char *url)
165 {
166  const char *proto_name = avio_find_protocol_name(url);
167  return proto_name && av_strstart(proto_name, "http", NULL);
168 }
169 
170 static int aligned(int val)
171 {
172  return ((val + 0x3F) >> 6) << 6;
173 }
174 
175 static uint64_t get_current_time_in_sec(void)
176 {
177  return av_gettime() / 1000000;
178 }
179 
180 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
181 {
182  struct tm timeinfo;
183  int year = 0;
184  int month = 0;
185  int day = 0;
186  int hour = 0;
187  int minute = 0;
188  int ret = 0;
189  float second = 0.0;
190 
191  /* ISO-8601 date parser */
192  if (!datetime)
193  return 0;
194 
195  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
196  /* year, month, day, hour, minute, second 6 arguments */
197  if (ret != 6) {
198  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
199  }
200  timeinfo.tm_year = year - 1900;
201  timeinfo.tm_mon = month - 1;
202  timeinfo.tm_mday = day;
203  timeinfo.tm_hour = hour;
204  timeinfo.tm_min = minute;
205  timeinfo.tm_sec = (int)second;
206 
207  return av_timegm(&timeinfo);
208 }
209 
210 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
211 {
212  /* ISO-8601 duration parser */
213  uint32_t days = 0;
214  uint32_t hours = 0;
215  uint32_t mins = 0;
216  uint32_t secs = 0;
217  int size = 0;
218  float value = 0;
219  char type = '\0';
220  const char *ptr = duration;
221 
222  while (*ptr) {
223  if (*ptr == 'P' || *ptr == 'T') {
224  ptr++;
225  continue;
226  }
227 
228  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
229  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
230  return 0; /* parser error */
231  }
232  switch (type) {
233  case 'D':
234  days = (uint32_t)value;
235  break;
236  case 'H':
237  hours = (uint32_t)value;
238  break;
239  case 'M':
240  mins = (uint32_t)value;
241  break;
242  case 'S':
243  secs = (uint32_t)value;
244  break;
245  default:
246  // handle invalid type
247  break;
248  }
249  ptr += size;
250  }
251  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
252 }
253 
255 {
256  int64_t start_time = 0;
257  int64_t i = 0;
258  int64_t j = 0;
259  int64_t num = 0;
260 
261  if (pls->n_timelines) {
262  for (i = 0; i < pls->n_timelines; i++) {
263  if (pls->timelines[i]->starttime > 0) {
264  start_time = pls->timelines[i]->starttime;
265  }
266  if (num == cur_seq_no)
267  goto finish;
268 
269  start_time += pls->timelines[i]->duration;
270 
271  if (pls->timelines[i]->repeat == -1) {
272  start_time = pls->timelines[i]->duration * cur_seq_no;
273  goto finish;
274  }
275 
276  for (j = 0; j < pls->timelines[i]->repeat; j++) {
277  num++;
278  if (num == cur_seq_no)
279  goto finish;
280  start_time += pls->timelines[i]->duration;
281  }
282  num++;
283  }
284  }
285 finish:
286  return start_time;
287 }
288 
290 {
291  int64_t i = 0;
292  int64_t j = 0;
293  int64_t num = 0;
294  int64_t start_time = 0;
295 
296  for (i = 0; i < pls->n_timelines; i++) {
297  if (pls->timelines[i]->starttime > 0) {
298  start_time = pls->timelines[i]->starttime;
299  }
300  if (start_time > cur_time)
301  goto finish;
302 
303  start_time += pls->timelines[i]->duration;
304  for (j = 0; j < pls->timelines[i]->repeat; j++) {
305  num++;
306  if (start_time > cur_time)
307  goto finish;
308  start_time += pls->timelines[i]->duration;
309  }
310  num++;
311  }
312 
313  return -1;
314 
315 finish:
316  return num;
317 }
318 
319 static void free_fragment(struct fragment **seg)
320 {
321  if (!(*seg)) {
322  return;
323  }
324  av_freep(&(*seg)->url);
325  av_freep(seg);
326 }
327 
328 static void free_fragment_list(struct representation *pls)
329 {
330  int i;
331 
332  for (i = 0; i < pls->n_fragments; i++) {
333  free_fragment(&pls->fragments[i]);
334  }
335  av_freep(&pls->fragments);
336  pls->n_fragments = 0;
337 }
338 
339 static void free_timelines_list(struct representation *pls)
340 {
341  int i;
342 
343  for (i = 0; i < pls->n_timelines; i++) {
344  av_freep(&pls->timelines[i]);
345  }
346  av_freep(&pls->timelines);
347  pls->n_timelines = 0;
348 }
349 
350 static void free_representation(struct representation *pls)
351 {
352  free_fragment_list(pls);
353  free_timelines_list(pls);
354  free_fragment(&pls->cur_seg);
356  av_freep(&pls->init_sec_buf);
357  av_freep(&pls->pb.pub.buffer);
358  ff_format_io_close(pls->parent, &pls->input);
359  if (pls->ctx) {
360  pls->ctx->pb = NULL;
361  avformat_close_input(&pls->ctx);
362  }
363 
364  av_freep(&pls->url_template);
365  av_freep(&pls->lang);
366  av_freep(&pls->id);
367  av_freep(&pls);
368 }
369 
371 {
372  int i;
373  for (i = 0; i < c->n_videos; i++) {
374  struct representation *pls = c->videos[i];
375  free_representation(pls);
376  }
377  av_freep(&c->videos);
378  c->n_videos = 0;
379 }
380 
382 {
383  int i;
384  for (i = 0; i < c->n_audios; i++) {
385  struct representation *pls = c->audios[i];
386  free_representation(pls);
387  }
388  av_freep(&c->audios);
389  c->n_audios = 0;
390 }
391 
393 {
394  int i;
395  for (i = 0; i < c->n_subtitles; i++) {
396  struct representation *pls = c->subtitles[i];
397  free_representation(pls);
398  }
399  av_freep(&c->subtitles);
400  c->n_subtitles = 0;
401 }
402 
403 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
404  AVDictionary **opts, AVDictionary *opts2, int *is_http)
405 {
406  DASHContext *c = s->priv_data;
407  AVDictionary *tmp = NULL;
408  const char *proto_name = NULL;
409  int proto_name_len;
410  int ret;
411 
412  if (av_strstart(url, "crypto", NULL)) {
413  if (url[6] == '+' || url[6] == ':')
414  proto_name = avio_find_protocol_name(url + 7);
415  }
416 
417  if (!proto_name)
418  proto_name = avio_find_protocol_name(url);
419 
420  if (!proto_name)
421  return AVERROR_INVALIDDATA;
422 
423  proto_name_len = strlen(proto_name);
424  // only http(s) & file are allowed
425  if (av_strstart(proto_name, "file", NULL)) {
426  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
428  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
429  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
430  url);
431  return AVERROR_INVALIDDATA;
432  }
433  } else if (av_strstart(proto_name, "http", NULL)) {
434  ;
435  } else
436  return AVERROR_INVALIDDATA;
437 
438  if (!strncmp(proto_name, url, proto_name_len) && url[proto_name_len] == ':')
439  ;
440  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, proto_name_len) && url[7 + proto_name_len] == ':')
441  ;
442  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
443  return AVERROR_INVALIDDATA;
444 
445  av_freep(pb);
446  av_dict_copy(&tmp, *opts, 0);
447  av_dict_copy(&tmp, opts2, 0);
448  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
449  if (ret >= 0) {
450  // update cookies on http response with setcookies.
451  char *new_cookies = NULL;
452 
453  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
454  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
455 
456  if (new_cookies) {
457  av_dict_set(opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
458  }
459 
460  }
461 
462  av_dict_free(&tmp);
463 
464  if (is_http)
465  *is_http = av_strstart(proto_name, "http", NULL);
466 
467  return ret;
468 }
469 
470 static char *get_content_url(xmlNodePtr *baseurl_nodes,
471  int n_baseurl_nodes,
472  int max_url_size,
473  char *rep_id_val,
474  char *rep_bandwidth_val,
475  char *val)
476 {
477  int i;
478  char *text;
479  char *url = NULL;
480  char *tmp_str = av_mallocz(max_url_size);
481 
482  if (!tmp_str)
483  return NULL;
484 
485  for (i = 0; i < n_baseurl_nodes; ++i) {
486  if (baseurl_nodes[i] &&
487  baseurl_nodes[i]->children &&
488  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
489  text = xmlNodeGetContent(baseurl_nodes[i]->children);
490  if (text) {
491  memset(tmp_str, 0, max_url_size);
492  ff_make_absolute_url(tmp_str, max_url_size, "", text);
493  xmlFree(text);
494  }
495  }
496  }
497 
498  if (val)
499  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
500 
501  if (rep_id_val) {
502  url = av_strireplace(tmp_str, "$RepresentationID$", rep_id_val);
503  if (!url) {
504  goto end;
505  }
506  av_strlcpy(tmp_str, url, max_url_size);
507  }
508  if (rep_bandwidth_val && tmp_str[0] != '\0') {
509  // free any previously assigned url before reassigning
510  av_free(url);
511  url = av_strireplace(tmp_str, "$Bandwidth$", rep_bandwidth_val);
512  if (!url) {
513  goto end;
514  }
515  }
516 end:
517  av_free(tmp_str);
518  return url;
519 }
520 
521 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
522 {
523  int i;
524  char *val;
525 
526  for (i = 0; i < n_nodes; ++i) {
527  if (nodes[i]) {
528  val = xmlGetProp(nodes[i], attrname);
529  if (val)
530  return val;
531  }
532  }
533 
534  return NULL;
535 }
536 
537 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
538 {
539  xmlNodePtr node = rootnode;
540  if (!node) {
541  return NULL;
542  }
543 
544  node = xmlFirstElementChild(node);
545  while (node) {
546  if (!av_strcasecmp(node->name, nodename)) {
547  return node;
548  }
549  node = xmlNextElementSibling(node);
550  }
551  return NULL;
552 }
553 
554 static enum AVMediaType get_content_type(xmlNodePtr node)
555 {
557  int i = 0;
558  const char *attr;
559  char *val = NULL;
560 
561  if (node) {
562  for (i = 0; i < 2; i++) {
563  attr = i ? "mimeType" : "contentType";
564  val = xmlGetProp(node, attr);
565  if (val) {
566  if (av_stristr(val, "video")) {
568  } else if (av_stristr(val, "audio")) {
570  } else if (av_stristr(val, "text")) {
572  }
573  xmlFree(val);
574  }
575  }
576  }
577  return type;
578 }
579 
580 static struct fragment * get_Fragment(char *range)
581 {
582  struct fragment * seg = av_mallocz(sizeof(struct fragment));
583 
584  if (!seg)
585  return NULL;
586 
587  seg->size = -1;
588  if (range) {
589  char *str_end_offset;
590  char *str_offset = av_strtok(range, "-", &str_end_offset);
591  seg->url_offset = strtoll(str_offset, NULL, 10);
592  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset + 1;
593  }
594 
595  return seg;
596 }
597 
599  xmlNodePtr fragmenturl_node,
600  xmlNodePtr *baseurl_nodes,
601  char *rep_id_val,
602  char *rep_bandwidth_val)
603 {
604  DASHContext *c = s->priv_data;
605  char *initialization_val = NULL;
606  char *media_val = NULL;
607  char *range_val = NULL;
608  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
609  int err;
610 
611  if (!av_strcasecmp(fragmenturl_node->name, "Initialization")) {
612  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
613  range_val = xmlGetProp(fragmenturl_node, "range");
614  if (initialization_val || range_val) {
616  rep->init_section = get_Fragment(range_val);
617  xmlFree(range_val);
618  if (!rep->init_section) {
619  xmlFree(initialization_val);
620  return AVERROR(ENOMEM);
621  }
622  rep->init_section->url = get_content_url(baseurl_nodes, 4,
623  max_url_size,
624  rep_id_val,
625  rep_bandwidth_val,
626  initialization_val);
627  xmlFree(initialization_val);
628  if (!rep->init_section->url) {
629  av_freep(&rep->init_section);
630  return AVERROR(ENOMEM);
631  }
632  }
633  } else if (!av_strcasecmp(fragmenturl_node->name, "SegmentURL")) {
634  media_val = xmlGetProp(fragmenturl_node, "media");
635  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
636  if (media_val || range_val) {
637  struct fragment *seg = get_Fragment(range_val);
638  xmlFree(range_val);
639  if (!seg) {
640  xmlFree(media_val);
641  return AVERROR(ENOMEM);
642  }
643  seg->url = get_content_url(baseurl_nodes, 4,
644  max_url_size,
645  rep_id_val,
646  rep_bandwidth_val,
647  media_val);
648  xmlFree(media_val);
649  if (!seg->url) {
650  av_free(seg);
651  return AVERROR(ENOMEM);
652  }
653  err = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
654  if (err < 0) {
655  free_fragment(&seg);
656  return err;
657  }
658  }
659  }
660 
661  return 0;
662 }
663 
665  xmlNodePtr fragment_timeline_node)
666 {
667  xmlAttrPtr attr = NULL;
668  char *val = NULL;
669  int err;
670 
671  if (!av_strcasecmp(fragment_timeline_node->name, "S")) {
672  struct timeline *tml = av_mallocz(sizeof(struct timeline));
673  if (!tml) {
674  return AVERROR(ENOMEM);
675  }
676  attr = fragment_timeline_node->properties;
677  while (attr) {
678  val = xmlGetProp(fragment_timeline_node, attr->name);
679 
680  if (!val) {
681  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
682  continue;
683  }
684 
685  if (!av_strcasecmp(attr->name, "t")) {
686  tml->starttime = (int64_t)strtoll(val, NULL, 10);
687  } else if (!av_strcasecmp(attr->name, "r")) {
688  tml->repeat =(int64_t) strtoll(val, NULL, 10);
689  } else if (!av_strcasecmp(attr->name, "d")) {
690  tml->duration = (int64_t)strtoll(val, NULL, 10);
691  }
692  attr = attr->next;
693  xmlFree(val);
694  }
695  err = av_dynarray_add_nofree(&rep->timelines, &rep->n_timelines, tml);
696  if (err < 0) {
697  av_free(tml);
698  return err;
699  }
700  }
701 
702  return 0;
703 }
704 
705 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
706 {
707  char *tmp_str = NULL;
708  char *path = NULL;
709  char *mpdName = NULL;
710  xmlNodePtr node = NULL;
711  char *baseurl = NULL;
712  char *root_url = NULL;
713  char *text = NULL;
714  char *tmp = NULL;
715  int isRootHttp = 0;
716  char token ='/';
717  int start = 0;
718  int rootId = 0;
719  int updated = 0;
720  int size = 0;
721  int i;
722  int tmp_max_url_size = strlen(url);
723 
724  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
725  text = xmlNodeGetContent(baseurl_nodes[i]);
726  if (!text)
727  continue;
728  tmp_max_url_size += strlen(text);
729  if (ishttp(text)) {
730  xmlFree(text);
731  break;
732  }
733  xmlFree(text);
734  }
735 
736  tmp_max_url_size = aligned(tmp_max_url_size);
737  text = av_mallocz(tmp_max_url_size);
738  if (!text) {
739  updated = AVERROR(ENOMEM);
740  goto end;
741  }
742  av_strlcpy(text, url, strlen(url)+1);
743  tmp = text;
744  while (mpdName = av_strtok(tmp, "/", &tmp)) {
745  size = strlen(mpdName);
746  }
747  av_free(text);
748 
749  path = av_mallocz(tmp_max_url_size);
750  tmp_str = av_mallocz(tmp_max_url_size);
751  if (!tmp_str || !path) {
752  updated = AVERROR(ENOMEM);
753  goto end;
754  }
755 
756  av_strlcpy (path, url, strlen(url) - size + 1);
757  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
758  if (!(node = baseurl_nodes[rootId])) {
759  continue;
760  }
761  text = xmlNodeGetContent(node);
762  if (ishttp(text)) {
763  xmlFree(text);
764  break;
765  }
766  xmlFree(text);
767  }
768 
769  node = baseurl_nodes[rootId];
770  baseurl = xmlNodeGetContent(node);
771  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
772  if (node) {
773  xmlNodeSetContent(node, root_url);
774  updated = 1;
775  }
776 
777  size = strlen(root_url);
778  isRootHttp = ishttp(root_url);
779 
780  if (size > 0 && root_url[size - 1] != token) {
781  av_strlcat(root_url, "/", size + 2);
782  size += 2;
783  }
784 
785  for (i = 0; i < n_baseurl_nodes; ++i) {
786  if (i == rootId) {
787  continue;
788  }
789  text = xmlNodeGetContent(baseurl_nodes[i]);
790  if (text && !av_strstart(text, "/", NULL)) {
791  memset(tmp_str, 0, strlen(tmp_str));
792  if (!ishttp(text) && isRootHttp) {
793  av_strlcpy(tmp_str, root_url, size + 1);
794  }
795  start = (text[0] == token);
796  if (start && av_stristr(tmp_str, text)) {
797  char *p = tmp_str;
798  if (!av_strncasecmp(tmp_str, "http://", 7)) {
799  p += 7;
800  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
801  p += 8;
802  }
803  p = strchr(p, '/');
804  memset(p + 1, 0, strlen(p));
805  }
806  av_strlcat(tmp_str, text + start, tmp_max_url_size);
807  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
808  updated = 1;
809  xmlFree(text);
810  }
811  }
812 
813 end:
814  if (tmp_max_url_size > *max_url_size) {
815  *max_url_size = tmp_max_url_size;
816  }
817  av_free(path);
818  av_free(tmp_str);
819  xmlFree(baseurl);
820  return updated;
821 
822 }
823 
824 static int parse_manifest_representation(AVFormatContext *s, const char *url,
825  xmlNodePtr node,
826  xmlNodePtr adaptionset_node,
827  xmlNodePtr mpd_baseurl_node,
828  xmlNodePtr period_baseurl_node,
829  xmlNodePtr period_segmenttemplate_node,
830  xmlNodePtr period_segmentlist_node,
831  xmlNodePtr fragment_template_node,
832  xmlNodePtr content_component_node,
833  xmlNodePtr adaptionset_baseurl_node,
834  xmlNodePtr adaptionset_segmentlist_node,
835  xmlNodePtr adaptionset_supplementalproperty_node)
836 {
837  int32_t ret = 0;
838  DASHContext *c = s->priv_data;
839  struct representation *rep = NULL;
840  struct fragment *seg = NULL;
841  xmlNodePtr representation_segmenttemplate_node = NULL;
842  xmlNodePtr representation_baseurl_node = NULL;
843  xmlNodePtr representation_segmentlist_node = NULL;
844  xmlNodePtr segmentlists_tab[3];
845  xmlNodePtr fragment_timeline_node = NULL;
846  xmlNodePtr fragment_templates_tab[5];
847  char *val = NULL;
848  xmlNodePtr baseurl_nodes[4];
849  xmlNodePtr representation_node = node;
850  char *rep_bandwidth_val;
852 
853  // try get information from representation
854  if (type == AVMEDIA_TYPE_UNKNOWN)
855  type = get_content_type(representation_node);
856  // try get information from contentComponen
857  if (type == AVMEDIA_TYPE_UNKNOWN)
858  type = get_content_type(content_component_node);
859  // try get information from adaption set
860  if (type == AVMEDIA_TYPE_UNKNOWN)
861  type = get_content_type(adaptionset_node);
864  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
865  return 0;
866  }
867 
868  // convert selected representation to our internal struct
869  rep = av_mallocz(sizeof(struct representation));
870  if (!rep)
871  return AVERROR(ENOMEM);
872  if (c->adaptionset_lang) {
873  rep->lang = av_strdup(c->adaptionset_lang);
874  if (!rep->lang) {
875  av_log(s, AV_LOG_ERROR, "alloc language memory failure\n");
876  av_freep(&rep);
877  return AVERROR(ENOMEM);
878  }
879  }
880  rep->parent = s;
881  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
882  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
883  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
884  rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
885  val = xmlGetProp(representation_node, "id");
886  if (val) {
887  rep->id = av_strdup(val);
888  xmlFree(val);
889  if (!rep->id)
890  goto enomem;
891  }
892 
893  baseurl_nodes[0] = mpd_baseurl_node;
894  baseurl_nodes[1] = period_baseurl_node;
895  baseurl_nodes[2] = adaptionset_baseurl_node;
896  baseurl_nodes[3] = representation_baseurl_node;
897 
898  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
899  c->max_url_size = aligned(c->max_url_size
900  + (rep->id ? strlen(rep->id) : 0)
901  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
902  if (ret == AVERROR(ENOMEM) || ret == 0)
903  goto free;
904  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
905  fragment_timeline_node = NULL;
906  fragment_templates_tab[0] = representation_segmenttemplate_node;
907  fragment_templates_tab[1] = adaptionset_segmentlist_node;
908  fragment_templates_tab[2] = fragment_template_node;
909  fragment_templates_tab[3] = period_segmenttemplate_node;
910  fragment_templates_tab[4] = period_segmentlist_node;
911 
912  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
913  if (val) {
914  rep->init_section = av_mallocz(sizeof(struct fragment));
915  if (!rep->init_section) {
916  xmlFree(val);
917  goto enomem;
918  }
919  c->max_url_size = aligned(c->max_url_size + strlen(val));
920  rep->init_section->url = get_content_url(baseurl_nodes, 4,
921  c->max_url_size, rep->id,
922  rep_bandwidth_val, val);
923  xmlFree(val);
924  if (!rep->init_section->url)
925  goto enomem;
926  rep->init_section->size = -1;
927  }
928  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
929  if (val) {
930  c->max_url_size = aligned(c->max_url_size + strlen(val));
931  rep->url_template = get_content_url(baseurl_nodes, 4,
932  c->max_url_size, rep->id,
933  rep_bandwidth_val, val);
934  xmlFree(val);
935  }
936  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
937  if (val) {
938  rep->presentation_timeoffset = (int64_t) strtoll(val, NULL, 10);
939  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
940  xmlFree(val);
941  }
942  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
943  if (val) {
944  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
945  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
946  xmlFree(val);
947  }
948  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
949  if (val) {
950  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
951  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
952  xmlFree(val);
953  }
954  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
955  if (val) {
956  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
957  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
958  xmlFree(val);
959  }
960  if (adaptionset_supplementalproperty_node) {
961  char *scheme_id_uri = xmlGetProp(adaptionset_supplementalproperty_node, "schemeIdUri");
962  if (scheme_id_uri) {
963  int is_last_segment_number = !av_strcasecmp(scheme_id_uri, "http://dashif.org/guidelines/last-segment-number");
964  xmlFree(scheme_id_uri);
965  if (is_last_segment_number) {
966  val = xmlGetProp(adaptionset_supplementalproperty_node, "value");
967  if (!val) {
968  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
969  } else {
970  rep->last_seq_no = (int64_t)strtoll(val, NULL, 10) - 1;
971  xmlFree(val);
972  }
973  }
974  }
975  }
976 
977  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
978 
979  if (!fragment_timeline_node)
980  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
981  if (!fragment_timeline_node)
982  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
983  if (!fragment_timeline_node)
984  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
985  if (fragment_timeline_node) {
986  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
987  while (fragment_timeline_node) {
988  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
989  if (ret < 0)
990  goto free;
991  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
992  }
993  }
994  } else if (representation_baseurl_node && !representation_segmentlist_node) {
995  seg = av_mallocz(sizeof(struct fragment));
996  if (!seg)
997  goto enomem;
998  ret = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
999  if (ret < 0) {
1000  av_free(seg);
1001  goto free;
1002  }
1003  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size,
1004  rep->id, rep_bandwidth_val, NULL);
1005  if (!seg->url)
1006  goto enomem;
1007  seg->size = -1;
1008  } else if (representation_segmentlist_node) {
1009  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1010  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1011  xmlNodePtr fragmenturl_node = NULL;
1012  segmentlists_tab[0] = representation_segmentlist_node;
1013  segmentlists_tab[1] = adaptionset_segmentlist_node;
1014  segmentlists_tab[2] = period_segmentlist_node;
1015 
1016  val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1017  if (val) {
1018  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
1019  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1020  xmlFree(val);
1021  }
1022  val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1023  if (val) {
1024  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
1025  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1026  xmlFree(val);
1027  }
1028  val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1029  if (val) {
1030  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
1031  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1032  xmlFree(val);
1033  }
1034 
1035  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1036  while (fragmenturl_node) {
1037  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1038  baseurl_nodes, rep->id,
1039  rep_bandwidth_val);
1040  if (ret < 0)
1041  goto free;
1042  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1043  }
1044 
1045  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1046  if (!fragment_timeline_node)
1047  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1048  if (fragment_timeline_node) {
1049  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1050  while (fragment_timeline_node) {
1051  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1052  if (ret < 0)
1053  goto free;
1054  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1055  }
1056  }
1057  } else {
1058  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id '%s' \n",
1059  rep->id ? rep->id : "");
1060  goto free;
1061  }
1062 
1063  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1064  rep->fragment_timescale = 1;
1065  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1066  rep->framerate = av_make_q(0, 0);
1067  if (type == AVMEDIA_TYPE_VIDEO) {
1068  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
1069  if (rep_framerate_val) {
1070  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1071  if (ret < 0)
1072  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1073  xmlFree(rep_framerate_val);
1074  }
1075  }
1076 
1077  switch (type) {
1078  case AVMEDIA_TYPE_VIDEO:
1079  ret = av_dynarray_add_nofree(&c->videos, &c->n_videos, rep);
1080  break;
1081  case AVMEDIA_TYPE_AUDIO:
1082  ret = av_dynarray_add_nofree(&c->audios, &c->n_audios, rep);
1083  break;
1084  case AVMEDIA_TYPE_SUBTITLE:
1085  ret = av_dynarray_add_nofree(&c->subtitles, &c->n_subtitles, rep);
1086  break;
1087  }
1088  if (ret < 0)
1089  goto free;
1090 
1091 end:
1092  if (rep_bandwidth_val)
1093  xmlFree(rep_bandwidth_val);
1094 
1095  return ret;
1096 enomem:
1097  ret = AVERROR(ENOMEM);
1098 free:
1099  free_representation(rep);
1100  goto end;
1101 }
1102 
1103 static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
1104 {
1105  DASHContext *c = s->priv_data;
1106 
1107  if (!adaptionset_node) {
1108  av_log(s, AV_LOG_WARNING, "Cannot get AdaptionSet\n");
1109  return AVERROR(EINVAL);
1110  }
1111  c->adaptionset_lang = xmlGetProp(adaptionset_node, "lang");
1112 
1113  return 0;
1114 }
1115 
1117  xmlNodePtr adaptionset_node,
1118  xmlNodePtr mpd_baseurl_node,
1119  xmlNodePtr period_baseurl_node,
1120  xmlNodePtr period_segmenttemplate_node,
1121  xmlNodePtr period_segmentlist_node)
1122 {
1123  int ret = 0;
1124  DASHContext *c = s->priv_data;
1125  xmlNodePtr fragment_template_node = NULL;
1126  xmlNodePtr content_component_node = NULL;
1127  xmlNodePtr adaptionset_baseurl_node = NULL;
1128  xmlNodePtr adaptionset_segmentlist_node = NULL;
1129  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1130  xmlNodePtr node = NULL;
1131 
1132  ret = parse_manifest_adaptationset_attr(s, adaptionset_node);
1133  if (ret < 0)
1134  return ret;
1135 
1136  node = xmlFirstElementChild(adaptionset_node);
1137  while (node) {
1138  if (!av_strcasecmp(node->name, "SegmentTemplate")) {
1139  fragment_template_node = node;
1140  } else if (!av_strcasecmp(node->name, "ContentComponent")) {
1141  content_component_node = node;
1142  } else if (!av_strcasecmp(node->name, "BaseURL")) {
1143  adaptionset_baseurl_node = node;
1144  } else if (!av_strcasecmp(node->name, "SegmentList")) {
1145  adaptionset_segmentlist_node = node;
1146  } else if (!av_strcasecmp(node->name, "SupplementalProperty")) {
1147  adaptionset_supplementalproperty_node = node;
1148  } else if (!av_strcasecmp(node->name, "Representation")) {
1150  adaptionset_node,
1151  mpd_baseurl_node,
1152  period_baseurl_node,
1153  period_segmenttemplate_node,
1154  period_segmentlist_node,
1155  fragment_template_node,
1156  content_component_node,
1157  adaptionset_baseurl_node,
1158  adaptionset_segmentlist_node,
1159  adaptionset_supplementalproperty_node);
1160  if (ret < 0)
1161  goto err;
1162  }
1163  node = xmlNextElementSibling(node);
1164  }
1165 
1166 err:
1167  xmlFree(c->adaptionset_lang);
1168  c->adaptionset_lang = NULL;
1169  return ret;
1170 }
1171 
1172 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1173 {
1174  xmlChar *val = NULL;
1175 
1176  node = xmlFirstElementChild(node);
1177  while (node) {
1178  if (!av_strcasecmp(node->name, "Title")) {
1179  val = xmlNodeGetContent(node);
1180  if (val) {
1181  av_dict_set(&s->metadata, "Title", val, 0);
1182  }
1183  } else if (!av_strcasecmp(node->name, "Source")) {
1184  val = xmlNodeGetContent(node);
1185  if (val) {
1186  av_dict_set(&s->metadata, "Source", val, 0);
1187  }
1188  } else if (!av_strcasecmp(node->name, "Copyright")) {
1189  val = xmlNodeGetContent(node);
1190  if (val) {
1191  av_dict_set(&s->metadata, "Copyright", val, 0);
1192  }
1193  }
1194  node = xmlNextElementSibling(node);
1195  xmlFree(val);
1196  val = NULL;
1197  }
1198  return 0;
1199 }
1200 
1201 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1202 {
1203  DASHContext *c = s->priv_data;
1204  int ret = 0;
1205  int close_in = 0;
1206  AVBPrint buf;
1207  AVDictionary *opts = NULL;
1208  xmlDoc *doc = NULL;
1209  xmlNodePtr root_element = NULL;
1210  xmlNodePtr node = NULL;
1211  xmlNodePtr period_node = NULL;
1212  xmlNodePtr tmp_node = NULL;
1213  xmlNodePtr mpd_baseurl_node = NULL;
1214  xmlNodePtr period_baseurl_node = NULL;
1215  xmlNodePtr period_segmenttemplate_node = NULL;
1216  xmlNodePtr period_segmentlist_node = NULL;
1217  xmlNodePtr adaptionset_node = NULL;
1218  xmlAttrPtr attr = NULL;
1219  char *val = NULL;
1220  uint32_t period_duration_sec = 0;
1221  uint32_t period_start_sec = 0;
1222 
1223  if (!in) {
1224  close_in = 1;
1225 
1226  av_dict_copy(&opts, c->avio_opts, 0);
1227  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1228  av_dict_free(&opts);
1229  if (ret < 0)
1230  return ret;
1231  }
1232 
1233  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&c->base_url) < 0)
1234  c->base_url = av_strdup(url);
1235 
1236  av_bprint_init(&buf, 0, INT_MAX); // xmlReadMemory uses integer bufsize
1237 
1238  if ((ret = avio_read_to_bprint(in, &buf, SIZE_MAX)) < 0 ||
1239  !avio_feof(in)) {
1240  av_log(s, AV_LOG_ERROR, "Unable to read to manifest '%s'\n", url);
1241  if (ret == 0)
1243  } else {
1244  LIBXML_TEST_VERSION
1245 
1246  doc = xmlReadMemory(buf.str, buf.len, c->base_url, NULL, 0);
1247  root_element = xmlDocGetRootElement(doc);
1248  node = root_element;
1249 
1250  if (!node) {
1252  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1253  goto cleanup;
1254  }
1255 
1256  if (node->type != XML_ELEMENT_NODE ||
1257  av_strcasecmp(node->name, "MPD")) {
1259  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1260  goto cleanup;
1261  }
1262 
1263  val = xmlGetProp(node, "type");
1264  if (!val) {
1265  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1267  goto cleanup;
1268  }
1269  if (!av_strcasecmp(val, "dynamic"))
1270  c->is_live = 1;
1271  xmlFree(val);
1272 
1273  attr = node->properties;
1274  while (attr) {
1275  val = xmlGetProp(node, attr->name);
1276 
1277  if (!av_strcasecmp(attr->name, "availabilityStartTime")) {
1278  c->availability_start_time = get_utc_date_time_insec(s, val);
1279  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1280  } else if (!av_strcasecmp(attr->name, "availabilityEndTime")) {
1281  c->availability_end_time = get_utc_date_time_insec(s, val);
1282  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1283  } else if (!av_strcasecmp(attr->name, "publishTime")) {
1284  c->publish_time = get_utc_date_time_insec(s, val);
1285  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1286  } else if (!av_strcasecmp(attr->name, "minimumUpdatePeriod")) {
1287  c->minimum_update_period = get_duration_insec(s, val);
1288  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1289  } else if (!av_strcasecmp(attr->name, "timeShiftBufferDepth")) {
1290  c->time_shift_buffer_depth = get_duration_insec(s, val);
1291  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1292  } else if (!av_strcasecmp(attr->name, "minBufferTime")) {
1293  c->min_buffer_time = get_duration_insec(s, val);
1294  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1295  } else if (!av_strcasecmp(attr->name, "suggestedPresentationDelay")) {
1296  c->suggested_presentation_delay = get_duration_insec(s, val);
1297  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1298  } else if (!av_strcasecmp(attr->name, "mediaPresentationDuration")) {
1299  c->media_presentation_duration = get_duration_insec(s, val);
1300  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1301  }
1302  attr = attr->next;
1303  xmlFree(val);
1304  }
1305 
1306  tmp_node = find_child_node_by_name(node, "BaseURL");
1307  if (tmp_node) {
1308  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1309  } else {
1310  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1311  }
1312 
1313  // at now we can handle only one period, with the longest duration
1314  node = xmlFirstElementChild(node);
1315  while (node) {
1316  if (!av_strcasecmp(node->name, "Period")) {
1317  period_duration_sec = 0;
1318  period_start_sec = 0;
1319  attr = node->properties;
1320  while (attr) {
1321  val = xmlGetProp(node, attr->name);
1322  if (!av_strcasecmp(attr->name, "duration")) {
1323  period_duration_sec = get_duration_insec(s, val);
1324  } else if (!av_strcasecmp(attr->name, "start")) {
1325  period_start_sec = get_duration_insec(s, val);
1326  }
1327  attr = attr->next;
1328  xmlFree(val);
1329  }
1330  if ((period_duration_sec) >= (c->period_duration)) {
1331  period_node = node;
1332  c->period_duration = period_duration_sec;
1333  c->period_start = period_start_sec;
1334  if (c->period_start > 0)
1335  c->media_presentation_duration = c->period_duration;
1336  }
1337  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1338  parse_programinformation(s, node);
1339  }
1340  node = xmlNextElementSibling(node);
1341  }
1342  if (!period_node) {
1343  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1345  goto cleanup;
1346  }
1347 
1348  adaptionset_node = xmlFirstElementChild(period_node);
1349  while (adaptionset_node) {
1350  if (!av_strcasecmp(adaptionset_node->name, "BaseURL")) {
1351  period_baseurl_node = adaptionset_node;
1352  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentTemplate")) {
1353  period_segmenttemplate_node = adaptionset_node;
1354  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentList")) {
1355  period_segmentlist_node = adaptionset_node;
1356  } else if (!av_strcasecmp(adaptionset_node->name, "AdaptationSet")) {
1357  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1358  }
1359  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1360  }
1361 cleanup:
1362  /*free the document */
1363  xmlFreeDoc(doc);
1364  xmlCleanupParser();
1365  xmlFreeNode(mpd_baseurl_node);
1366  }
1367 
1368  av_bprint_finalize(&buf, NULL);
1369  if (close_in) {
1370  avio_close(in);
1371  }
1372  return ret;
1373 }
1374 
1376 {
1377  DASHContext *c = s->priv_data;
1378  int64_t num = 0;
1379  int64_t start_time_offset = 0;
1380 
1381  if (c->is_live) {
1382  if (pls->n_fragments) {
1383  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1384  num = pls->first_seq_no;
1385  } else if (pls->n_timelines) {
1386  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1387  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1388  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1389  if (num == -1)
1390  num = pls->first_seq_no;
1391  else
1392  num += pls->first_seq_no;
1393  } else if (pls->fragment_duration){
1394  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1395  if (pls->presentation_timeoffset) {
1396  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
1397  } else if (c->publish_time > 0 && !c->availability_start_time) {
1398  if (c->min_buffer_time) {
1399  num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
1400  } else {
1401  num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1402  }
1403  } else {
1404  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1405  }
1406  }
1407  } else {
1408  num = pls->first_seq_no;
1409  }
1410  return num;
1411 }
1412 
1414 {
1415  DASHContext *c = s->priv_data;
1416  int64_t num = 0;
1417 
1418  if (c->is_live && pls->fragment_duration) {
1419  av_log(s, AV_LOG_TRACE, "in live mode\n");
1420  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
1421  } else {
1422  num = pls->first_seq_no;
1423  }
1424  return num;
1425 }
1426 
1428 {
1429  int64_t num = 0;
1430 
1431  if (pls->n_fragments) {
1432  num = pls->first_seq_no + pls->n_fragments - 1;
1433  } else if (pls->n_timelines) {
1434  int i = 0;
1435  num = pls->first_seq_no + pls->n_timelines - 1;
1436  for (i = 0; i < pls->n_timelines; i++) {
1437  if (pls->timelines[i]->repeat == -1) {
1438  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1439  num = c->period_duration / length_of_each_segment;
1440  } else {
1441  num += pls->timelines[i]->repeat;
1442  }
1443  }
1444  } else if (c->is_live && pls->fragment_duration) {
1445  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time)) * pls->fragment_timescale) / pls->fragment_duration;
1446  } else if (pls->fragment_duration) {
1447  num = pls->first_seq_no + av_rescale_rnd(1, c->media_presentation_duration * pls->fragment_timescale, pls->fragment_duration, AV_ROUND_UP);
1448  }
1449 
1450  return num;
1451 }
1452 
1453 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1454 {
1455  if (rep_dest && rep_src ) {
1456  free_timelines_list(rep_dest);
1457  rep_dest->timelines = rep_src->timelines;
1458  rep_dest->n_timelines = rep_src->n_timelines;
1459  rep_dest->first_seq_no = rep_src->first_seq_no;
1460  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1461  rep_src->timelines = NULL;
1462  rep_src->n_timelines = 0;
1463  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1464  }
1465 }
1466 
1467 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1468 {
1469  if (rep_dest && rep_src ) {
1470  free_fragment_list(rep_dest);
1471  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1472  rep_dest->cur_seq_no = 0;
1473  else
1474  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1475  rep_dest->fragments = rep_src->fragments;
1476  rep_dest->n_fragments = rep_src->n_fragments;
1477  rep_dest->parent = rep_src->parent;
1478  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1479  rep_src->fragments = NULL;
1480  rep_src->n_fragments = 0;
1481  }
1482 }
1483 
1484 
1486 {
1487  int ret = 0, i;
1488  DASHContext *c = s->priv_data;
1489  // save current context
1490  int n_videos = c->n_videos;
1491  struct representation **videos = c->videos;
1492  int n_audios = c->n_audios;
1493  struct representation **audios = c->audios;
1494  int n_subtitles = c->n_subtitles;
1495  struct representation **subtitles = c->subtitles;
1496  char *base_url = c->base_url;
1497 
1498  c->base_url = NULL;
1499  c->n_videos = 0;
1500  c->videos = NULL;
1501  c->n_audios = 0;
1502  c->audios = NULL;
1503  c->n_subtitles = 0;
1504  c->subtitles = NULL;
1505  ret = parse_manifest(s, s->url, NULL);
1506  if (ret)
1507  goto finish;
1508 
1509  if (c->n_videos != n_videos) {
1511  "new manifest has mismatched no. of video representations, %d -> %d\n",
1512  n_videos, c->n_videos);
1513  return AVERROR_INVALIDDATA;
1514  }
1515  if (c->n_audios != n_audios) {
1517  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1518  n_audios, c->n_audios);
1519  return AVERROR_INVALIDDATA;
1520  }
1521  if (c->n_subtitles != n_subtitles) {
1523  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1524  n_subtitles, c->n_subtitles);
1525  return AVERROR_INVALIDDATA;
1526  }
1527 
1528  for (i = 0; i < n_videos; i++) {
1529  struct representation *cur_video = videos[i];
1530  struct representation *ccur_video = c->videos[i];
1531  if (cur_video->timelines) {
1532  // calc current time
1533  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1534  // update segments
1535  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1536  if (ccur_video->cur_seq_no >= 0) {
1537  move_timelines(ccur_video, cur_video, c);
1538  }
1539  }
1540  if (cur_video->fragments) {
1541  move_segments(ccur_video, cur_video, c);
1542  }
1543  }
1544  for (i = 0; i < n_audios; i++) {
1545  struct representation *cur_audio = audios[i];
1546  struct representation *ccur_audio = c->audios[i];
1547  if (cur_audio->timelines) {
1548  // calc current time
1549  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1550  // update segments
1551  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1552  if (ccur_audio->cur_seq_no >= 0) {
1553  move_timelines(ccur_audio, cur_audio, c);
1554  }
1555  }
1556  if (cur_audio->fragments) {
1557  move_segments(ccur_audio, cur_audio, c);
1558  }
1559  }
1560 
1561 finish:
1562  // restore context
1563  if (c->base_url)
1564  av_free(base_url);
1565  else
1566  c->base_url = base_url;
1567 
1568  if (c->subtitles)
1570  if (c->audios)
1571  free_audio_list(c);
1572  if (c->videos)
1573  free_video_list(c);
1574 
1575  c->n_subtitles = n_subtitles;
1576  c->subtitles = subtitles;
1577  c->n_audios = n_audios;
1578  c->audios = audios;
1579  c->n_videos = n_videos;
1580  c->videos = videos;
1581  return ret;
1582 }
1583 
1584 static struct fragment *get_current_fragment(struct representation *pls)
1585 {
1586  int64_t min_seq_no = 0;
1587  int64_t max_seq_no = 0;
1588  struct fragment *seg = NULL;
1589  struct fragment *seg_ptr = NULL;
1590  DASHContext *c = pls->parent->priv_data;
1591 
1592  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1593  if (pls->cur_seq_no < pls->n_fragments) {
1594  seg_ptr = pls->fragments[pls->cur_seq_no];
1595  seg = av_mallocz(sizeof(struct fragment));
1596  if (!seg) {
1597  return NULL;
1598  }
1599  seg->url = av_strdup(seg_ptr->url);
1600  if (!seg->url) {
1601  av_free(seg);
1602  return NULL;
1603  }
1604  seg->size = seg_ptr->size;
1605  seg->url_offset = seg_ptr->url_offset;
1606  return seg;
1607  } else if (c->is_live) {
1608  refresh_manifest(pls->parent);
1609  } else {
1610  break;
1611  }
1612  }
1613  if (c->is_live) {
1614  min_seq_no = calc_min_seg_no(pls->parent, pls);
1615  max_seq_no = calc_max_seg_no(pls, c);
1616 
1617  if (pls->timelines || pls->fragments) {
1618  refresh_manifest(pls->parent);
1619  }
1620  if (pls->cur_seq_no <= min_seq_no) {
1621  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"]\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no);
1622  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1623  } else if (pls->cur_seq_no > max_seq_no) {
1624  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"]\n", min_seq_no, max_seq_no);
1625  }
1626  seg = av_mallocz(sizeof(struct fragment));
1627  if (!seg) {
1628  return NULL;
1629  }
1630  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1631  seg = av_mallocz(sizeof(struct fragment));
1632  if (!seg) {
1633  return NULL;
1634  }
1635  }
1636  if (seg) {
1637  char *tmpfilename;
1638  if (!pls->url_template) {
1639  av_log(pls->parent, AV_LOG_ERROR, "Cannot get fragment, missing template URL\n");
1640  av_free(seg);
1641  return NULL;
1642  }
1643  tmpfilename = av_mallocz(c->max_url_size);
1644  if (!tmpfilename) {
1645  av_free(seg);
1646  return NULL;
1647  }
1648  ff_dash_fill_tmpl_params(tmpfilename, c->max_url_size, pls->url_template, 0, pls->cur_seq_no, 0, get_segment_start_time_based_on_timeline(pls, pls->cur_seq_no));
1649  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1650  if (!seg->url) {
1651  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1652  seg->url = av_strdup(pls->url_template);
1653  if (!seg->url) {
1654  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1655  av_free(tmpfilename);
1656  av_free(seg);
1657  return NULL;
1658  }
1659  }
1660  av_free(tmpfilename);
1661  seg->size = -1;
1662  }
1663 
1664  return seg;
1665 }
1666 
1667 static int read_from_url(struct representation *pls, struct fragment *seg,
1668  uint8_t *buf, int buf_size)
1669 {
1670  int ret;
1671 
1672  /* limit read if the fragment was only a part of a file */
1673  if (seg->size >= 0)
1674  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1675 
1676  ret = avio_read(pls->input, buf, buf_size);
1677  if (ret > 0)
1678  pls->cur_seg_offset += ret;
1679 
1680  return ret;
1681 }
1682 
1683 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1684 {
1685  AVDictionary *opts = NULL;
1686  char *url = NULL;
1687  int ret = 0;
1688 
1689  url = av_mallocz(c->max_url_size);
1690  if (!url) {
1691  ret = AVERROR(ENOMEM);
1692  goto cleanup;
1693  }
1694 
1695  if (seg->size >= 0) {
1696  /* try to restrict the HTTP request to the part we want
1697  * (if this is in fact a HTTP request) */
1698  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1699  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1700  }
1701 
1702  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1703  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64"\n",
1704  url, seg->url_offset);
1705  ret = open_url(pls->parent, &pls->input, url, &c->avio_opts, opts, NULL);
1706 
1707 cleanup:
1708  av_free(url);
1709  av_dict_free(&opts);
1710  pls->cur_seg_offset = 0;
1711  pls->cur_seg_size = seg->size;
1712  return ret;
1713 }
1714 
1715 static int update_init_section(struct representation *pls)
1716 {
1717  static const int max_init_section_size = 1024 * 1024;
1718  DASHContext *c = pls->parent->priv_data;
1719  int64_t sec_size;
1720  int64_t urlsize;
1721  int ret;
1722 
1723  if (!pls->init_section || pls->init_sec_buf)
1724  return 0;
1725 
1726  ret = open_input(c, pls, pls->init_section);
1727  if (ret < 0) {
1729  "Failed to open an initialization section\n");
1730  return ret;
1731  }
1732 
1733  if (pls->init_section->size >= 0)
1734  sec_size = pls->init_section->size;
1735  else if ((urlsize = avio_size(pls->input)) >= 0)
1736  sec_size = urlsize;
1737  else
1738  sec_size = max_init_section_size;
1739 
1740  av_log(pls->parent, AV_LOG_DEBUG,
1741  "Downloading an initialization section of size %"PRId64"\n",
1742  sec_size);
1743 
1744  sec_size = FFMIN(sec_size, max_init_section_size);
1745 
1746  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1747 
1748  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1749  pls->init_sec_buf_size);
1750  ff_format_io_close(pls->parent, &pls->input);
1751 
1752  if (ret < 0)
1753  return ret;
1754 
1755  pls->init_sec_data_len = ret;
1756  pls->init_sec_buf_read_offset = 0;
1757 
1758  return 0;
1759 }
1760 
1761 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1762 {
1763  struct representation *v = opaque;
1764  if (v->n_fragments && !v->init_sec_data_len) {
1765  return avio_seek(v->input, offset, whence);
1766  }
1767 
1768  return AVERROR(ENOSYS);
1769 }
1770 
1771 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1772 {
1773  int ret = 0;
1774  struct representation *v = opaque;
1775  DASHContext *c = v->parent->priv_data;
1776 
1777 restart:
1778  if (!v->input) {
1779  free_fragment(&v->cur_seg);
1780  v->cur_seg = get_current_fragment(v);
1781  if (!v->cur_seg) {
1782  ret = AVERROR_EOF;
1783  goto end;
1784  }
1785 
1786  /* load/update Media Initialization Section, if any */
1787  ret = update_init_section(v);
1788  if (ret)
1789  goto end;
1790 
1791  ret = open_input(c, v, v->cur_seg);
1792  if (ret < 0) {
1793  if (ff_check_interrupt(c->interrupt_callback)) {
1794  ret = AVERROR_EXIT;
1795  goto end;
1796  }
1797  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist\n");
1798  v->cur_seq_no++;
1799  goto restart;
1800  }
1801  }
1802 
1804  /* Push init section out first before first actual fragment */
1805  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1806  memcpy(buf, v->init_sec_buf, copy_size);
1807  v->init_sec_buf_read_offset += copy_size;
1808  ret = copy_size;
1809  goto end;
1810  }
1811 
1812  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1813  if (!v->cur_seg) {
1814  v->cur_seg = get_current_fragment(v);
1815  }
1816  if (!v->cur_seg) {
1817  ret = AVERROR_EOF;
1818  goto end;
1819  }
1820  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1821  if (ret > 0)
1822  goto end;
1823 
1824  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1825  if (!v->is_restart_needed)
1826  v->cur_seq_no++;
1827  v->is_restart_needed = 1;
1828  }
1829 
1830 end:
1831  return ret;
1832 }
1833 
1834 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1835  int flags, AVDictionary **opts)
1836 {
1838  "A DASH playlist item '%s' referred to an external file '%s'. "
1839  "Opening this file was forbidden for security reasons\n",
1840  s->url, url);
1841  return AVERROR(EPERM);
1842 }
1843 
1845 {
1846  /* note: the internal buffer could have changed */
1847  av_freep(&pls->pb.pub.buffer);
1848  memset(&pls->pb, 0x00, sizeof(pls->pb));
1849  pls->ctx->pb = NULL;
1850  avformat_close_input(&pls->ctx);
1851 }
1852 
1854 {
1855  DASHContext *c = s->priv_data;
1856  const AVInputFormat *in_fmt = NULL;
1857  AVDictionary *in_fmt_opts = NULL;
1858  uint8_t *avio_ctx_buffer = NULL;
1859  int ret = 0, i;
1860 
1861  if (pls->ctx) {
1863  }
1864 
1865  if (ff_check_interrupt(&s->interrupt_callback)) {
1866  ret = AVERROR_EXIT;
1867  goto fail;
1868  }
1869 
1870  if (!(pls->ctx = avformat_alloc_context())) {
1871  ret = AVERROR(ENOMEM);
1872  goto fail;
1873  }
1874 
1875  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1876  if (!avio_ctx_buffer ) {
1877  ret = AVERROR(ENOMEM);
1878  avformat_free_context(pls->ctx);
1879  pls->ctx = NULL;
1880  goto fail;
1881  }
1882  ffio_init_context(&pls->pb, avio_ctx_buffer, INITIAL_BUFFER_SIZE, 0,
1883  pls, read_data, NULL, c->is_live ? NULL : seek_data);
1884  pls->pb.pub.seekable = 0;
1885 
1886  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1887  goto fail;
1888 
1889  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1890  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;
1891  pls->ctx->max_analyze_duration = s->max_analyze_duration > 0 ? s->max_analyze_duration : 4 * AV_TIME_BASE;
1892  pls->ctx->interrupt_callback = s->interrupt_callback;
1893  ret = av_probe_input_buffer(&pls->pb.pub, &in_fmt, "", NULL, 0, 0);
1894  if (ret < 0) {
1895  av_log(s, AV_LOG_ERROR, "Error when loading first fragment of playlist\n");
1896  avformat_free_context(pls->ctx);
1897  pls->ctx = NULL;
1898  goto fail;
1899  }
1900 
1901  pls->ctx->pb = &pls->pb.pub;
1902  pls->ctx->io_open = nested_io_open;
1903 
1904  if (c->cenc_decryption_key)
1905  av_dict_set(&in_fmt_opts, "decryption_key", c->cenc_decryption_key, 0);
1906 
1907  // provide additional information from mpd if available
1908  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1909  av_dict_free(&in_fmt_opts);
1910  if (ret < 0)
1911  goto fail;
1912  if (pls->n_fragments) {
1913 #if FF_API_R_FRAME_RATE
1914  if (pls->framerate.den) {
1915  for (i = 0; i < pls->ctx->nb_streams; i++)
1916  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1917  }
1918 #endif
1920  if (ret < 0)
1921  goto fail;
1922  }
1923 
1924 fail:
1925  return ret;
1926 }
1927 
1929 {
1930  int ret = 0;
1931  int i;
1932 
1933  pls->parent = s;
1934  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1935 
1936  if (!pls->last_seq_no) {
1937  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1938  }
1939 
1941  if (ret < 0) {
1942  goto fail;
1943  }
1944  for (i = 0; i < pls->ctx->nb_streams; i++) {
1946  AVStream *ist = pls->ctx->streams[i];
1947  if (!st) {
1948  ret = AVERROR(ENOMEM);
1949  goto fail;
1950  }
1951  st->id = i;
1954 
1955  // copy disposition
1956  st->disposition = ist->disposition;
1957  }
1958 
1959  return 0;
1960 fail:
1961  return ret;
1962 }
1963 
1964 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1965 {
1966  struct fragment *first_init_section = pls[0]->init_section;
1967  char *url =NULL;
1968  int64_t url_offset = -1;
1969  int64_t size = -1;
1970  int i = 0;
1971 
1972  if (first_init_section == NULL || n_pls == 0)
1973  return 0;
1974 
1975  url = first_init_section->url;
1976  url_offset = first_init_section->url_offset;
1977  size = pls[0]->init_section->size;
1978  for (i=0;i<n_pls;i++) {
1979  if (!pls[i]->init_section)
1980  continue;
1981 
1982  if (av_strcasecmp(pls[i]->init_section->url, url) ||
1983  pls[i]->init_section->url_offset != url_offset ||
1984  pls[i]->init_section->size != size) {
1985  return 0;
1986  }
1987  }
1988  return 1;
1989 }
1990 
1991 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
1992 {
1993  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
1994  if (!rep_dest->init_sec_buf) {
1995  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
1996  return AVERROR(ENOMEM);
1997  }
1998  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
1999  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2000  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2001  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2002 
2003  return 0;
2004 }
2005 
2006 static void move_metadata(AVStream *st, const char *key, char **value)
2007 {
2008  if (*value) {
2010  *value = NULL;
2011  }
2012 }
2013 
2015 {
2016  DASHContext *c = s->priv_data;
2017  struct representation *rep;
2018  AVProgram *program;
2019  int ret = 0;
2020  int stream_index = 0;
2021  int i;
2022 
2023  c->interrupt_callback = &s->interrupt_callback;
2024 
2025  if ((ret = ffio_copy_url_options(s->pb, &c->avio_opts)) < 0)
2026  return ret;
2027 
2028  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2029  return ret;
2030 
2031  /* If this isn't a live stream, fill the total duration of the
2032  * stream. */
2033  if (!c->is_live) {
2034  s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
2035  } else {
2036  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2037  }
2038 
2039  if(c->n_videos)
2040  c->is_init_section_common_video = is_common_init_section_exist(c->videos, c->n_videos);
2041 
2042  /* Open the demuxer for video and audio components if available */
2043  for (i = 0; i < c->n_videos; i++) {
2044  rep = c->videos[i];
2045  if (i > 0 && c->is_init_section_common_video) {
2046  ret = copy_init_section(rep, c->videos[0]);
2047  if (ret < 0)
2048  return ret;
2049  }
2050  ret = open_demux_for_component(s, rep);
2051 
2052  if (ret)
2053  return ret;
2054  rep->stream_index = stream_index;
2055  ++stream_index;
2056  }
2057 
2058  if(c->n_audios)
2059  c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
2060 
2061  for (i = 0; i < c->n_audios; i++) {
2062  rep = c->audios[i];
2063  if (i > 0 && c->is_init_section_common_audio) {
2064  ret = copy_init_section(rep, c->audios[0]);
2065  if (ret < 0)
2066  return ret;
2067  }
2068  ret = open_demux_for_component(s, rep);
2069 
2070  if (ret)
2071  return ret;
2072  rep->stream_index = stream_index;
2073  ++stream_index;
2074  }
2075 
2076  if (c->n_subtitles)
2077  c->is_init_section_common_subtitle = is_common_init_section_exist(c->subtitles, c->n_subtitles);
2078 
2079  for (i = 0; i < c->n_subtitles; i++) {
2080  rep = c->subtitles[i];
2081  if (i > 0 && c->is_init_section_common_subtitle) {
2082  ret = copy_init_section(rep, c->subtitles[0]);
2083  if (ret < 0)
2084  return ret;
2085  }
2086  ret = open_demux_for_component(s, rep);
2087 
2088  if (ret)
2089  return ret;
2090  rep->stream_index = stream_index;
2091  ++stream_index;
2092  }
2093 
2094  if (!stream_index)
2095  return AVERROR_INVALIDDATA;
2096 
2097  /* Create a program */
2098  program = av_new_program(s, 0);
2099  if (!program)
2100  return AVERROR(ENOMEM);
2101 
2102  for (i = 0; i < c->n_videos; i++) {
2103  rep = c->videos[i];
2105  rep->assoc_stream = s->streams[rep->stream_index];
2106  if (rep->bandwidth > 0)
2107  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2108  move_metadata(rep->assoc_stream, "id", &rep->id);
2109  }
2110  for (i = 0; i < c->n_audios; i++) {
2111  rep = c->audios[i];
2113  rep->assoc_stream = s->streams[rep->stream_index];
2114  if (rep->bandwidth > 0)
2115  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2116  move_metadata(rep->assoc_stream, "id", &rep->id);
2117  move_metadata(rep->assoc_stream, "language", &rep->lang);
2118  }
2119  for (i = 0; i < c->n_subtitles; i++) {
2120  rep = c->subtitles[i];
2122  rep->assoc_stream = s->streams[rep->stream_index];
2123  move_metadata(rep->assoc_stream, "id", &rep->id);
2124  move_metadata(rep->assoc_stream, "language", &rep->lang);
2125  }
2126 
2127  return 0;
2128 }
2129 
2130 static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
2131 {
2132  int i, j;
2133 
2134  for (i = 0; i < n; i++) {
2135  struct representation *pls = p[i];
2136  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2137 
2138  if (needed && !pls->ctx) {
2139  pls->cur_seg_offset = 0;
2140  pls->init_sec_buf_read_offset = 0;
2141  /* Catch up */
2142  for (j = 0; j < n; j++) {
2143  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2144  }
2146  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2147  } else if (!needed && pls->ctx) {
2149  ff_format_io_close(pls->parent, &pls->input);
2150  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2151  }
2152  }
2153 }
2154 
2156 {
2157  DASHContext *c = s->priv_data;
2158  int ret = 0, i;
2159  int64_t mints = 0;
2160  struct representation *cur = NULL;
2161  struct representation *rep = NULL;
2162 
2163  recheck_discard_flags(s, c->videos, c->n_videos);
2164  recheck_discard_flags(s, c->audios, c->n_audios);
2165  recheck_discard_flags(s, c->subtitles, c->n_subtitles);
2166 
2167  for (i = 0; i < c->n_videos; i++) {
2168  rep = c->videos[i];
2169  if (!rep->ctx)
2170  continue;
2171  if (!cur || rep->cur_timestamp < mints) {
2172  cur = rep;
2173  mints = rep->cur_timestamp;
2174  }
2175  }
2176  for (i = 0; i < c->n_audios; i++) {
2177  rep = c->audios[i];
2178  if (!rep->ctx)
2179  continue;
2180  if (!cur || rep->cur_timestamp < mints) {
2181  cur = rep;
2182  mints = rep->cur_timestamp;
2183  }
2184  }
2185 
2186  for (i = 0; i < c->n_subtitles; i++) {
2187  rep = c->subtitles[i];
2188  if (!rep->ctx)
2189  continue;
2190  if (!cur || rep->cur_timestamp < mints) {
2191  cur = rep;
2192  mints = rep->cur_timestamp;
2193  }
2194  }
2195 
2196  if (!cur) {
2197  return AVERROR_INVALIDDATA;
2198  }
2199  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2200  ret = av_read_frame(cur->ctx, pkt);
2201  if (ret >= 0) {
2202  /* If we got a packet, return it */
2203  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2204  pkt->stream_index = cur->stream_index;
2205  return 0;
2206  }
2207  if (cur->is_restart_needed) {
2208  cur->cur_seg_offset = 0;
2209  cur->init_sec_buf_read_offset = 0;
2210  ff_format_io_close(cur->parent, &cur->input);
2212  cur->is_restart_needed = 0;
2213  }
2214  }
2215  return AVERROR_EOF;
2216 }
2217 
2219 {
2220  DASHContext *c = s->priv_data;
2221  free_audio_list(c);
2222  free_video_list(c);
2224  av_dict_free(&c->avio_opts);
2225  av_freep(&c->base_url);
2226  return 0;
2227 }
2228 
2229 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2230 {
2231  int ret = 0;
2232  int i = 0;
2233  int j = 0;
2234  int64_t duration = 0;
2235 
2236  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms] %s\n",
2237  seek_pos_msec, dry_run ? " (dry)" : "");
2238 
2239  // single fragment mode
2240  if (pls->n_fragments == 1) {
2241  pls->cur_timestamp = 0;
2242  pls->cur_seg_offset = 0;
2243  if (dry_run)
2244  return 0;
2245  ff_read_frame_flush(pls->ctx);
2246  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2247  }
2248 
2249  ff_format_io_close(pls->parent, &pls->input);
2250 
2251  // find the nearest fragment
2252  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2253  int64_t num = pls->first_seq_no;
2254  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2255  "last_seq_no[%"PRId64"].\n",
2256  (int)pls->n_timelines, (int64_t)pls->last_seq_no);
2257  for (i = 0; i < pls->n_timelines; i++) {
2258  if (pls->timelines[i]->starttime > 0) {
2259  duration = pls->timelines[i]->starttime;
2260  }
2261  duration += pls->timelines[i]->duration;
2262  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2263  goto set_seq_num;
2264  }
2265  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2266  duration += pls->timelines[i]->duration;
2267  num++;
2268  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2269  goto set_seq_num;
2270  }
2271  }
2272  num++;
2273  }
2274 
2275 set_seq_num:
2276  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2277  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"].\n",
2278  (int64_t)pls->cur_seq_no);
2279  } else if (pls->fragment_duration > 0) {
2280  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2281  } else {
2282  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2283  pls->cur_seq_no = pls->first_seq_no;
2284  }
2285  pls->cur_timestamp = 0;
2286  pls->cur_seg_offset = 0;
2287  pls->init_sec_buf_read_offset = 0;
2288  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2289 
2290  return ret;
2291 }
2292 
2293 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2294 {
2295  int ret = 0, i;
2296  DASHContext *c = s->priv_data;
2297  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2298  s->streams[stream_index]->time_base.den,
2301  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2302  return AVERROR(ENOSYS);
2303 
2304  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2305  for (i = 0; i < c->n_videos; i++) {
2306  if (!ret)
2307  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2308  }
2309  for (i = 0; i < c->n_audios; i++) {
2310  if (!ret)
2311  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2312  }
2313  for (i = 0; i < c->n_subtitles; i++) {
2314  if (!ret)
2315  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2316  }
2317 
2318  return ret;
2319 }
2320 
2321 static int dash_probe(const AVProbeData *p)
2322 {
2323  if (!av_stristr(p->buf, "<MPD"))
2324  return 0;
2325 
2326  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2327  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2328  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2329  av_stristr(p->buf, "dash:profile:isoff-main:2011") ||
2330  av_stristr(p->buf, "3GPP:PSS:profile:DASH1")) {
2331  return AVPROBE_SCORE_MAX;
2332  }
2333  if (av_stristr(p->buf, "dash:profile")) {
2334  return AVPROBE_SCORE_MAX;
2335  }
2336 
2337  return 0;
2338 }
2339 
2340 #define OFFSET(x) offsetof(DASHContext, x)
2341 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2342 static const AVOption dash_options[] = {
2343  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2344  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2345  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm,ts"},
2346  INT_MIN, INT_MAX, FLAGS},
2347  { "cenc_decryption_key", "Media decryption key (hex)", OFFSET(cenc_decryption_key), AV_OPT_TYPE_STRING, {.str = NULL}, INT_MIN, INT_MAX, .flags = FLAGS },
2348  {NULL}
2349 };
2350 
2351 static const AVClass dash_class = {
2352  .class_name = "dash",
2353  .item_name = av_default_item_name,
2354  .option = dash_options,
2355  .version = LIBAVUTIL_VERSION_INT,
2356 };
2357 
2359  .p.name = "dash",
2360  .p.long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2361  .p.priv_class = &dash_class,
2362  .p.flags = AVFMT_NO_BYTE_SEEK,
2363  .priv_data_size = sizeof(DASHContext),
2364  .flags_internal = FF_INFMT_FLAG_INIT_CLEANUP,
2370 };
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
reopen_demux_for_component
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1853
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:134
close_demux_for_component
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1844
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
calc_next_seg_no_from_timelines
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:289
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
program
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:6
open_demux_for_component
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1928
read_data
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1771
ffio_init_context
void ffio_init_context(FFIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, const uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:49
ffio_copy_url_options
int ffio_copy_url_options(AVIOContext *pb, AVDictionary **avio_opts)
Read url related dictionary options from the AVIOContext and write to the given dictionary.
Definition: aviobuf.c:990
representation::start_number
int64_t start_number
Definition: dashdec.c:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
get_current_time_in_sec
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:175
ishttp
static int ishttp(char *url)
Definition: dashdec.c:164
calc_min_seg_no
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1413
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
FLAGS
#define FLAGS
Definition: dashdec.c:2341
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:58
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const struct AVCodec *c)
Add a new stream to a media file.
representation::assoc_stream
AVStream * assoc_stream
Definition: dashdec.c:90
free_video_list
static void free_video_list(DASHContext *c)
Definition: dashdec.c:370
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:814
representation::init_sec_buf_read_offset
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:117
representation::cur_seq_no
int64_t cur_seq_no
Definition: dashdec.c:107
get_current_fragment
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1584
int64_t
long long int64_t
Definition: coverity.c:34
DASHContext::n_subtitles
int n_subtitles
Definition: dashdec.c:130
DASHContext::is_init_section_common_subtitle
int is_init_section_common_subtitle
Definition: dashdec.c:160
av_strcasecmp
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:207
representation::cur_seg_offset
int64_t cur_seg_offset
Definition: dashdec.c:108
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
dash_close
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2218
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:130
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1323
AVOption
AVOption.
Definition: opt.h:346
DASHContext::interrupt_callback
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:151
parse_manifest_segmenturlnode
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:598
AVFMT_FLAG_CUSTOM_IO
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don't avio_close() it.
Definition: avformat.h:1414
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2434
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
representation::id
char * id
Definition: dashdec.c:86
DASHContext::n_audios
int n_audios
Definition: dashdec.c:128
AVDictionary
Definition: dict.c:34
representation::last_seq_no
int64_t last_seq_no
Definition: dashdec.c:99
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFormatContext::probesize
int64_t probesize
Maximum number of bytes read from input in order to determine stream properties.
Definition: avformat.h:1442
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1525
ff_read_frame_flush
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: seek.c:720
read_from_url
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1667
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
representation::n_fragments
int n_fragments
Definition: dashdec.c:92
FFIOContext
Definition: avio_internal.h:28
DASHContext::availability_end_time
uint64_t availability_end_time
Definition: dashdec.c:137
find_child_node_by_name
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:537
representation::first_seq_no
int64_t first_seq_no
Definition: dashdec.c:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
fragment
Definition: dashdec.c:36
DASHContext::n_videos
int n_videos
Definition: dashdec.c:126
DASHContext
Definition: dashdec.c:122
get_segment_start_time_based_on_timeline
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:254
DASHContext::subtitles
struct representation ** subtitles
Definition: dashdec.c:131
AVPROBE_SCORE_MAX
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:463
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:362
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1528
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *st, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: avformat.c:853
representation::init_section
struct fragment * init_section
Definition: dashdec.c:113
finish
static void finish(void)
Definition: movenc.c:342
DASHContext::publish_time
uint64_t publish_time
Definition: dashdec.c:138
free_timelines_list
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:339
calc_max_seg_no
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1427
free_fragment
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:319
fail
#define fail()
Definition: checkasm.h:179
calc_cur_seg_no
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1375
read_seek
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:151
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:143
val
static double val(void *priv, double ch)
Definition: aeval.c:78
recheck_discard_flags
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2130
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_timegm
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:570
av_new_program
AVProgram * av_new_program(AVFormatContext *ac, int id)
Definition: avformat.c:334
get_utc_date_time_insec
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:180
get_content_type
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:554
ff_check_interrupt
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:853
AVRational::num
int num
Numerator.
Definition: rational.h:59
dash_options
static const AVOption dash_options[]
Definition: dashdec.c:2342
DASHContext::avio_opts
AVDictionary * avio_opts
Definition: dashdec.c:153
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
DASHContext::suggested_presentation_delay
uint64_t suggested_presentation_delay
Definition: dashdec.c:135
seek_data
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1761
aligned
static int aligned(int val)
Definition: dashdec.c:170
representation::n_timelines
int n_timelines
Definition: dashdec.c:95
representation::pb
FFIOContext pb
Definition: dashdec.c:80
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVInputFormat
Definition: avformat.h:548
free_representation
static void free_representation(struct representation *pls)
Definition: dashdec.c:350
duration
int64_t duration
Definition: movenc.c:64
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:214
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_read_callback.c:41
move_metadata
static void move_metadata(AVStream *st, const char *key, char **value)
Definition: dashdec.c:2006
DASHContext::max_url_size
int max_url_size
Definition: dashdec.c:154
DASHContext::allowed_extensions
char * allowed_extensions
Definition: dashdec.c:152
move_segments
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1467
s
#define s(width, name)
Definition: cbs_vp9.c:198
fragment::url_offset
int64_t url_offset
Definition: dashdec.c:37
DASHContext::adaptionset_lang
char * adaptionset_lang
Definition: dashdec.c:148
avio_read_to_bprint
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size)
Read contents of h into print buffer, up to max_size bytes, or up to EOF.
Definition: aviobuf.c:1250
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: seek.c:639
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1406
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
free_fragment_list
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:328
AVProbeData::buf
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:453
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
av_match_ext
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:42
representation::is_restart_needed
int is_restart_needed
Definition: dashdec.c:119
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
parse_programinformation
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1172
get_duration_insec
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:210
DASHContext::videos
struct representation ** videos
Definition: dashdec.c:127
INITIAL_BUFFER_SIZE
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:34
key
const char * key
Definition: hwcontext_opencl.c:189
representation::cur_timestamp
int64_t cur_timestamp
Definition: dashdec.c:118
timeline::duration
int64_t duration
Definition: dashdec.c:70
representation::init_sec_buf_size
uint32_t init_sec_buf_size
Definition: dashdec.c:115
representation::stream_index
int stream_index
Definition: dashdec.c:84
AVFormatContext::max_analyze_duration
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1450
representation::ctx
AVFormatContext * ctx
Definition: dashdec.c:83
FF_INFMT_FLAG_INIT_CLEANUP
#define FF_INFMT_FLAG_INIT_CLEANUP
For an FFInputFormat with this flag set read_close() needs to be called by the caller upon read_heade...
Definition: demux.h:35
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
representation::lang
char * lang
Definition: dashdec.c:87
internal.h
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVSEEK_FLAG_BACKWARD
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2433
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:550
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:782
NULL
#define NULL
Definition: coverity.c:32
av_program_add_stream_index
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
Definition: avformat.c:365
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_strireplace
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:229
is_common_init_section_exist
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1964
ff_copy_whiteblacklists
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: avformat.c:898
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
dash_read_seek
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2293
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1297
parseutils.h
AVProbeData
This structure contains the data a format has to probe a file.
Definition: avformat.h:451
move_timelines
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1453
representation::timelines
struct timeline ** timelines
Definition: dashdec.c:96
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:823
DASHContext::minimum_update_period
uint64_t minimum_update_period
Definition: dashdec.c:139
time.h
ff_dash_demuxer
const FFInputFormat ff_dash_demuxer
Definition: dashdec.c:2358
timeline::starttime
int64_t starttime
Definition: dashdec.c:60
DASHContext::period_start
uint64_t period_start
Definition: dashdec.c:145
parse_manifest
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1201
representation::url_template
char * url_template
Definition: dashdec.c:79
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1311
get_val_from_nodes_tab
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:521
AV_ROUND_DOWN
@ AV_ROUND_DOWN
Round toward -infinity.
Definition: mathematics.h:133
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
DASHContext::time_shift_buffer_depth
uint64_t time_shift_buffer_depth
Definition: dashdec.c:140
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2499
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
resolve_content_path
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:705
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:160
DASHContext::media_presentation_duration
uint64_t media_presentation_duration
Definition: dashdec.c:134
AVIOContext::seekable
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:261
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
FFIOContext::pub
AVIOContext pub
Definition: avio_internal.h:29
start_time
static int64_t start_time
Definition: ffplay.c:329
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
representation::bandwidth
int bandwidth
Definition: dashdec.c:88
representation::parent
AVFormatContext * parent
Definition: dashdec.c:82
ff_format_io_close
int ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: avformat.c:944
AVMEDIA_TYPE_UNKNOWN
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
OFFSET
#define OFFSET(x)
Definition: dashdec.c:2340
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2557
FFInputFormat::p
AVInputFormat p
The public AVInputFormat.
Definition: demux.h:41
copy_init_section
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:1991
DASHContext::availability_start_time
uint64_t availability_start_time
Definition: dashdec.c:136
representation::init_sec_data_len
uint32_t init_sec_data_len
Definition: dashdec.c:116
dash_read_header
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2014
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
free_audio_list
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:381
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
representation::framerate
AVRational framerate
Definition: dashdec.c:89
av_strstart
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:36
av_probe_input_buffer
int av_probe_input_buffer(AVIOContext *pb, const AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:344
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
DASHContext::cenc_decryption_key
char * cenc_decryption_key
Definition: dashdec.c:155
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:181
open_url
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary **opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:403
bprint.h
free_subtitle_list
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:392
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
avio_internal.h
dash_probe
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2321
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
DASHContext::audios
struct representation ** audios
Definition: dashdec.c:129
representation::fragment_timescale
int64_t fragment_timescale
Definition: dashdec.c:103
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
DASHContext::is_init_section_common_audio
int is_init_section_common_audio
Definition: dashdec.c:159
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
parse_manifest_adaptationset
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1116
url.h
fragment::url
char * url
Definition: dashdec.c:39
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1179
demux.h
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
DASHContext::min_buffer_time
uint64_t min_buffer_time
Definition: dashdec.c:141
nested_io_open
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1834
DASHContext::is_live
int is_live
Definition: dashdec.c:150
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:812
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:755
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
avio_seek
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:230
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:95
representation::input
AVIOContext * input
Definition: dashdec.c:81
get_Fragment
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:580
parse_manifest_segmenttimeline
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:664
representation
Definition: dashdec.c:78
representation::init_sec_buf
uint8_t * init_sec_buf
Definition: dashdec.c:114
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:313
MAX_URL_SIZE
#define MAX_URL_SIZE
Definition: internal.h:30
parse_manifest_adaptationset_attr
static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
Definition: dashdec.c:1103
AVRational::den
int den
Denominator.
Definition: rational.h:60
representation::cur_seg
struct fragment * cur_seg
Definition: dashdec.c:110
get_content_url
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:470
DASHContext::is_init_section_common_video
int is_init_section_common_video
Definition: dashdec.c:158
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: avformat.c:141
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:611
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:909
refresh_manifest
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1485
AVFormatContext::io_open
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1856
update_init_section
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1715
parse_manifest_representation
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:824
AVPacket::stream_index
int stream_index
Definition: packet.h:524
dash_read_packet
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2155
open_input
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1683
timeline
Definition: dashdec.c:47
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:167
representation::cur_seg_size
int64_t cur_seg_size
Definition: dashdec.c:109
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
read_probe
static int read_probe(const AVProbeData *p)
Definition: cdg.c:30
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:490
AVIOContext::buffer
unsigned char * buffer
Start of the buffer.
Definition: avio.h:225
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
ff_make_absolute_url
int ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:321
AVPacket
This structure stores compressed data.
Definition: packet.h:499
ff_dash_fill_tmpl_params
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:95
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
FFInputFormat
Definition: demux.h:37
representation::fragment_duration
int64_t fragment_duration
Definition: dashdec.c:102
avio_find_protocol_name
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:656
int32_t
int32_t
Definition: audioconvert.c:56
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:615
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:1145
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
dash_seek
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2229
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
timeline::repeat
int64_t repeat
Definition: dashdec.c:66
dash.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
DASHContext::base_url
char * base_url
Definition: dashdec.c:124
AVStream::pts_wrap_bits
int pts_wrap_bits
Number of bits in timestamps.
Definition: avformat.h:918
int
int
Definition: ffmpeg_filter.c:409
representation::fragments
struct fragment ** fragments
Definition: dashdec.c:93
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1283
dash_class
static const AVClass dash_class
Definition: dashdec.c:2351
DASHContext::period_duration
uint64_t period_duration
Definition: dashdec.c:144
representation::presentation_timeoffset
int64_t presentation_timeoffset
Definition: dashdec.c:105
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:106
fragment::size
int64_t size
Definition: dashdec.c:38
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:345